diff --git a/api/go.mod b/api/go.mod
index c9c73feb6c..f190b495a2 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -3,7 +3,7 @@ module github.com/openshift/hypershift/api
go 1.23.0
require (
- github.com/openshift/api v0.0.0-20250124212313-a770960d61e0
+ github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f
k8s.io/api v0.32.2
k8s.io/apimachinery v0.32.2
)
@@ -25,7 +25,7 @@ require (
golang.org/x/text v0.21.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
+ k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
diff --git a/api/go.sum b/api/go.sum
index 09358e12b2..75cae5d11e 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -28,8 +28,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 h1:dCvNfygMrPLVNQ06bpHXrxKfrXHiprO4+etHrRUqI8g=
-github.com/openshift/api v0.0.0-20250124212313-a770960d61e0/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
+github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f h1:HH+BqfxONWB2kknhCLAjiAievsyb56WvLGXdNOseI8g=
+github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -87,8 +87,7 @@ k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ=
k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
diff --git a/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
index 4a6823640d..a447adb9f4 100644
--- a/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
+++ b/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
@@ -53,6 +53,8 @@ type ClusterOperatorStatus struct {
// conditions describes the state of the operator's managed and monitored components.
// +patchMergeKey=type
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
// +optional
Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
diff --git a/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
index 0293603d78..21fe0e13e9 100644
--- a/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
+++ b/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
@@ -528,18 +528,22 @@ type AWSPlatformStatus struct {
// AWSResourceTag is a tag to apply to AWS resources created for the cluster.
type AWSResourceTag struct {
- // key is the key of the tag
+ // key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag.
+ // Key should consist of between 1 and 128 characters, and may
+ // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=128
- // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
+ // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag key. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'"
// +required
Key string `json:"key"`
- // value is the value of the tag.
+ // value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag.
+ // Value should consist of between 1 and 256 characters, and may
+ // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.
// Some AWS service do not support empty values. Since tags are added to resources in many services, the
// length of the tag value must meet the requirements of all services.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
- // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
+ // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag value. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'"
// +required
Value string `json:"value"`
}
diff --git a/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
index 0ac9c7ccd2..cbe5cdd80f 100644
--- a/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ b/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -1198,8 +1198,8 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string {
var map_AWSResourceTag = map[string]string{
"": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.",
- "key": "key is the key of the tag",
- "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.",
+ "key": "key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. Key should consist of between 1 and 128 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.",
+ "value": "value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. Value should consist of between 1 and 256 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.",
}
func (AWSResourceTag) SwaggerDoc() map[string]string {
diff --git a/api/vendor/modules.txt b/api/vendor/modules.txt
index d343d2dce8..0abd0aa28b 100644
--- a/api/vendor/modules.txt
+++ b/api/vendor/modules.txt
@@ -23,7 +23,7 @@ github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.2
## explicit; go 1.12
github.com/modern-go/reflect2
-# github.com/openshift/api v0.0.0-20250124212313-a770960d61e0
+# github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f
## explicit; go 1.23.0
github.com/openshift/api/config/v1
# github.com/rogpeppe/go-internal v1.13.1
diff --git a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go
index 5f1639b073..2444ac3dbf 100644
--- a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go
+++ b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1alpha1
import (
+ v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
diff --git a/client/applyconfiguration/hypershift/v1beta1/autonode.go b/client/applyconfiguration/hypershift/v1beta1/autonode.go
index 374b390290..7b5cf06808 100644
--- a/client/applyconfiguration/hypershift/v1beta1/autonode.go
+++ b/client/applyconfiguration/hypershift/v1beta1/autonode.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AutoNodeApplyConfiguration represents an declarative configuration of the AutoNode type for use
// with apply.
type AutoNodeApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go b/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go
index 4672ab1c22..bf35cabf73 100644
--- a/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go
+++ b/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AWSCloudProviderConfigApplyConfiguration represents an declarative configuration of the AWSCloudProviderConfig type for use
// with apply.
type AWSCloudProviderConfigApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go b/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go
index f728155d44..a8f49f1078 100644
--- a/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AWSKMSSpecApplyConfiguration represents an declarative configuration of the AWSKMSSpec type for use
// with apply.
type AWSKMSSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go
index d9caedfabf..8426f503af 100644
--- a/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go
+++ b/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AWSNodePoolPlatformApplyConfiguration represents an declarative configuration of the AWSNodePoolPlatform type for use
// with apply.
type AWSNodePoolPlatformApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go
index cc11a94976..37def802db 100644
--- a/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// AWSPlatformSpecApplyConfiguration represents an declarative configuration of the AWSPlatformSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go b/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go
index f67e8456d2..f07cbf7627 100644
--- a/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go
+++ b/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AWSResourceReferenceApplyConfiguration represents an declarative configuration of the AWSResourceReference type for use
// with apply.
type AWSResourceReferenceApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go b/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go
index 70a0a57507..82a5b74595 100644
--- a/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go
+++ b/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AWSSharedVPCApplyConfiguration represents an declarative configuration of the AWSSharedVPC type for use
// with apply.
type AWSSharedVPCApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go b/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go
index 9a0f54940c..e8e7a2e098 100644
--- a/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AzureKMSSpecApplyConfiguration represents an declarative configuration of the AzureKMSSpec type for use
// with apply.
type AzureKMSSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go
index 602e5897c5..f989c3b447 100644
--- a/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go
+++ b/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AzureNodePoolPlatformApplyConfiguration represents an declarative configuration of the AzureNodePoolPlatform type for use
// with apply.
type AzureNodePoolPlatformApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go
index f1b63cec7c..a7e73e97e2 100644
--- a/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AzurePlatformSpecApplyConfiguration represents an declarative configuration of the AzurePlatformSpec type for use
// with apply.
type AzurePlatformSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go b/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go
index 15fa0ebc6a..7d89cf0f29 100644
--- a/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go
+++ b/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// AzureResourceManagedIdentitiesApplyConfiguration represents an declarative configuration of the AzureResourceManagedIdentities type for use
// with apply.
type AzureResourceManagedIdentitiesApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go b/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go
index 3cbf2cfaf8..817eee831d 100644
--- a/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go
+++ b/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// AzureVMImageApplyConfiguration represents an declarative configuration of the AzureVMImage type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go b/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go
index 5051f97cdc..e9ed686263 100644
--- a/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go
+++ b/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// ClusterNetworkingApplyConfiguration represents an declarative configuration of the ClusterNetworking type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go b/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go
index 421ca49b47..5c91d343ea 100644
--- a/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go
+++ b/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// ControlPlaneManagedIdentitiesApplyConfiguration represents an declarative configuration of the ControlPlaneManagedIdentities type for use
// with apply.
type ControlPlaneManagedIdentitiesApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/diagnostics.go b/client/applyconfiguration/hypershift/v1beta1/diagnostics.go
index d7c6a94285..df5b402351 100644
--- a/client/applyconfiguration/hypershift/v1beta1/diagnostics.go
+++ b/client/applyconfiguration/hypershift/v1beta1/diagnostics.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// DiagnosticsApplyConfiguration represents an declarative configuration of the Diagnostics type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/etcdspec.go b/client/applyconfiguration/hypershift/v1beta1/etcdspec.go
index 840435e400..4d0cdb156e 100644
--- a/client/applyconfiguration/hypershift/v1beta1/etcdspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/etcdspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// EtcdSpecApplyConfiguration represents an declarative configuration of the EtcdSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go b/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go
index 65f3904709..0ff601ceed 100644
--- a/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go
+++ b/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1beta1
import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go b/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go
index ba18e9adac..3a4b6eb311 100644
--- a/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go
@@ -20,6 +20,7 @@ package v1beta1
import (
v1 "github.com/openshift/api/config/v1"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
corev1 "k8s.io/api/core/v1"
)
diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go b/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go
index 433804746c..c2ad61336f 100644
--- a/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go
+++ b/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go
index dfd2ecc322..5bffb5bfa8 100644
--- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go
+++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1beta1
import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go
index 5b2e7836e3..810211ff1f 100644
--- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go
@@ -20,6 +20,7 @@ package v1beta1
import (
v1 "github.com/openshift/api/config/v1"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
corev1 "k8s.io/api/core/v1"
)
diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go
index bd78944b3c..2835183942 100644
--- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go
+++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1beta1
import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
diff --git a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go
index e9aaa9d44f..6f843362ea 100644
--- a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// IBMCloudKMSAuthSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSAuthSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go
index 90ca5a990c..7f65476618 100644
--- a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// IBMCloudKMSSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSSpec type for use
// with apply.
type IBMCloudKMSSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go b/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go
index cc2c078a92..bf35f79d1e 100644
--- a/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go
+++ b/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// KarpenterConfigApplyConfiguration represents an declarative configuration of the KarpenterConfig type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/kmsspec.go b/client/applyconfiguration/hypershift/v1beta1/kmsspec.go
index eece69f45e..33a5cf0543 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kmsspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kmsspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// KMSSpecApplyConfiguration represents an declarative configuration of the KMSSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go
index 5176990201..7b0328b0a5 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// KubevirtManualStorageDriverConfigApplyConfiguration represents an declarative configuration of the KubevirtManualStorageDriverConfig type for use
// with apply.
type KubevirtManualStorageDriverConfigApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go
index 88d6679275..1caac99872 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// KubevirtNodePoolPlatformApplyConfiguration represents an declarative configuration of the KubevirtNodePoolPlatform type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go
index bf1f45e965..e02897816b 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// KubeVirtNodePoolStatusApplyConfiguration represents an declarative configuration of the KubeVirtNodePoolStatus type for use
// with apply.
type KubeVirtNodePoolStatusApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go
index b0c8ad7962..8a93b3ae98 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// KubevirtPlatformCredentialsApplyConfiguration represents an declarative configuration of the KubevirtPlatformCredentials type for use
// with apply.
type KubevirtPlatformCredentialsApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go
index 3c5714926a..ea21de4a30 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// KubevirtPlatformSpecApplyConfiguration represents an declarative configuration of the KubevirtPlatformSpec type for use
// with apply.
type KubevirtPlatformSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go
index bda9105ba8..2802a493b9 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// KubevirtRootVolumeApplyConfiguration represents an declarative configuration of the KubevirtRootVolume type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go
index 974490912d..8162f08702 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// KubevirtStorageDriverSpecApplyConfiguration represents an declarative configuration of the KubevirtStorageDriverSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go
index 78fa0dcedb..6c0b485aba 100644
--- a/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go
+++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// KubevirtVolumeApplyConfiguration represents an declarative configuration of the KubevirtVolume type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go b/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go
index c233269317..8b5cfad9f9 100644
--- a/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// ManagedEtcdSpecApplyConfiguration represents an declarative configuration of the ManagedEtcdSpec type for use
// with apply.
type ManagedEtcdSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go b/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go
index 8c632ef1f7..60885da80e 100644
--- a/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// ManagedEtcdStorageSpecApplyConfiguration represents an declarative configuration of the ManagedEtcdStorageSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/networkfilter.go b/client/applyconfiguration/hypershift/v1beta1/networkfilter.go
index 6b9b3679d1..525275c05a 100644
--- a/client/applyconfiguration/hypershift/v1beta1/networkfilter.go
+++ b/client/applyconfiguration/hypershift/v1beta1/networkfilter.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// NetworkFilterApplyConfiguration represents an declarative configuration of the NetworkFilter type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/networkparam.go b/client/applyconfiguration/hypershift/v1beta1/networkparam.go
index d859ec4cb5..cf80f73afe 100644
--- a/client/applyconfiguration/hypershift/v1beta1/networkparam.go
+++ b/client/applyconfiguration/hypershift/v1beta1/networkparam.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// NetworkParamApplyConfiguration represents an declarative configuration of the NetworkParam type for use
// with apply.
type NetworkParamApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepool.go b/client/applyconfiguration/hypershift/v1beta1/nodepool.go
index 666d301d68..6fd3c768d5 100644
--- a/client/applyconfiguration/hypershift/v1beta1/nodepool.go
+++ b/client/applyconfiguration/hypershift/v1beta1/nodepool.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1beta1
import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go
index a23475b359..46335069d8 100644
--- a/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go
+++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// NodePoolManagementApplyConfiguration represents an declarative configuration of the NodePoolManagement type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go
index a6bec424f4..83b5829bc4 100644
--- a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go
+++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// NodePoolPlatformApplyConfiguration represents an declarative configuration of the NodePoolPlatform type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go
index ae60c60f26..848f464391 100644
--- a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go
+++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// NodePoolPlatformStatusApplyConfiguration represents an declarative configuration of the NodePoolPlatformStatus type for use
// with apply.
type NodePoolPlatformStatusApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go
index 7483baeb2a..ca49fc92b6 100644
--- a/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1beta1
import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go
index e47b7fac6d..dd196ed612 100644
--- a/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go
+++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// NodePoolStatusApplyConfiguration represents an declarative configuration of the NodePoolStatus type for use
// with apply.
type NodePoolStatusApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go
index 98ee398f0d..c65bdb6b4e 100644
--- a/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go
+++ b/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// OpenStackNodePoolPlatformApplyConfiguration represents an declarative configuration of the OpenStackNodePoolPlatform type for use
// with apply.
type OpenStackNodePoolPlatformApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go
index 66ba3c8ae3..2d55c84405 100644
--- a/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// OpenStackPlatformSpecApplyConfiguration represents an declarative configuration of the OpenStackPlatformSpec type for use
// with apply.
type OpenStackPlatformSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/platformspec.go b/client/applyconfiguration/hypershift/v1beta1/platformspec.go
index a5213aa0e4..170941258d 100644
--- a/client/applyconfiguration/hypershift/v1beta1/platformspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/platformspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/platformstatus.go b/client/applyconfiguration/hypershift/v1beta1/platformstatus.go
index d56dc2979e..25a20590eb 100644
--- a/client/applyconfiguration/hypershift/v1beta1/platformstatus.go
+++ b/client/applyconfiguration/hypershift/v1beta1/platformstatus.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// PlatformStatusApplyConfiguration represents an declarative configuration of the PlatformStatus type for use
// with apply.
type PlatformStatusApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/portspec.go b/client/applyconfiguration/hypershift/v1beta1/portspec.go
index 8b3b6b2b5a..5a5a44ff4f 100644
--- a/client/applyconfiguration/hypershift/v1beta1/portspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/portspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// PortSpecApplyConfiguration represents an declarative configuration of the PortSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go
index 1e248bfe5e..c2e9223ad7 100644
--- a/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go
+++ b/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
diff --git a/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go
index 8047a8e89c..9e924fe431 100644
--- a/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1beta1
import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
v1 "k8s.io/api/core/v1"
)
diff --git a/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go b/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go
index 7d972793ff..7864341b04 100644
--- a/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go
+++ b/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// ProvisionerConfigApplyConfiguration represents an declarative configuration of the ProvisionerConfig type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go b/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go
index 4411fe53f2..cff2bc40ca 100644
--- a/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go
+++ b/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// ReplaceUpgradeApplyConfiguration represents an declarative configuration of the ReplaceUpgrade type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/routerfilter.go b/client/applyconfiguration/hypershift/v1beta1/routerfilter.go
index c3a52f2777..5b2b982581 100644
--- a/client/applyconfiguration/hypershift/v1beta1/routerfilter.go
+++ b/client/applyconfiguration/hypershift/v1beta1/routerfilter.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// RouterFilterApplyConfiguration represents an declarative configuration of the RouterFilter type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/routerparam.go b/client/applyconfiguration/hypershift/v1beta1/routerparam.go
index 1d3469c20c..d834ae574f 100644
--- a/client/applyconfiguration/hypershift/v1beta1/routerparam.go
+++ b/client/applyconfiguration/hypershift/v1beta1/routerparam.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// RouterParamApplyConfiguration represents an declarative configuration of the RouterParam type for use
// with apply.
type RouterParamApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go b/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go
index 558e494826..fb3068fefa 100644
--- a/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// SecretEncryptionSpecApplyConfiguration represents an declarative configuration of the SecretEncryptionSpec type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go
index 6f47aa7726..e3003802d5 100644
--- a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go
+++ b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// ServicePublishingStrategyApplyConfiguration represents an declarative configuration of the ServicePublishingStrategy type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go
index 22bfe60303..e2dad876e8 100644
--- a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go
+++ b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go
@@ -19,6 +19,7 @@ package v1beta1
import (
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// ServicePublishingStrategyMappingApplyConfiguration represents an declarative configuration of the ServicePublishingStrategyMapping type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go b/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go
index 893dc3e324..8abfc7745b 100644
--- a/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go
+++ b/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go
@@ -19,6 +19,7 @@ package v1beta1
import (
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
)
// SubnetFilterApplyConfiguration represents an declarative configuration of the SubnetFilter type for use
diff --git a/client/applyconfiguration/hypershift/v1beta1/subnetparam.go b/client/applyconfiguration/hypershift/v1beta1/subnetparam.go
index 6754f748da..5fa2173ee0 100644
--- a/client/applyconfiguration/hypershift/v1beta1/subnetparam.go
+++ b/client/applyconfiguration/hypershift/v1beta1/subnetparam.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// SubnetParamApplyConfiguration represents an declarative configuration of the SubnetParam type for use
// with apply.
type SubnetParamApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/subnetspec.go b/client/applyconfiguration/hypershift/v1beta1/subnetspec.go
index 016c4cd98e..6408f73890 100644
--- a/client/applyconfiguration/hypershift/v1beta1/subnetspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/subnetspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// SubnetSpecApplyConfiguration represents an declarative configuration of the SubnetSpec type for use
// with apply.
type SubnetSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go b/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go
index 09f0acf3c7..84f51a30ec 100644
--- a/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go
+++ b/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1beta1
+import (
+ v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
+)
+
// UnmanagedEtcdSpecApplyConfiguration represents an declarative configuration of the UnmanagedEtcdSpec type for use
// with apply.
type UnmanagedEtcdSpecApplyConfiguration struct {
diff --git a/client/applyconfiguration/internal/internal.go b/client/applyconfiguration/internal/internal.go
index 765bec46ab..289f7f35f4 100644
--- a/client/applyconfiguration/internal/internal.go
+++ b/client/applyconfiguration/internal/internal.go
@@ -18,9 +18,6 @@ limitations under the License.
package internal
import (
- "fmt"
- "sync"
-
typed "sigs.k8s.io/structured-merge-diff/v4/typed"
)
diff --git a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go
index 5b1f541d83..42feefa671 100644
--- a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go
+++ b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1alpha1
import (
+ v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
diff --git a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go
index deb0fd83af..0afb04d962 100644
--- a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go
+++ b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1alpha1
import (
+ v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1"
resource "k8s.io/apimachinery/pkg/api/resource"
)
diff --git a/client/applyconfiguration/scheduling/v1alpha1/effects.go b/client/applyconfiguration/scheduling/v1alpha1/effects.go
index ba77d9bdaa..066fcf9b12 100644
--- a/client/applyconfiguration/scheduling/v1alpha1/effects.go
+++ b/client/applyconfiguration/scheduling/v1alpha1/effects.go
@@ -18,6 +18,7 @@ limitations under the License.
package v1alpha1
import (
+ v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go b/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go
index 0928fbf153..4efe73a8c0 100644
--- a/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go
+++ b/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go
@@ -17,6 +17,10 @@ limitations under the License.
package v1alpha1
+import (
+ v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1"
+)
+
// SizeConfigurationApplyConfiguration represents an declarative configuration of the SizeConfiguration type for use
// with apply.
type SizeConfigurationApplyConfiguration struct {
diff --git a/client/clientset/clientset/clientset.go b/client/clientset/clientset/clientset.go
index 8e910c9216..c2d8b72351 100644
--- a/client/clientset/clientset/clientset.go
+++ b/client/clientset/clientset/clientset.go
@@ -18,9 +18,6 @@ limitations under the License.
package clientset
import (
- "fmt"
- "net/http"
-
certificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1"
schedulingv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/scheduling/v1alpha1"
diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go
index b241c32c89..ffb87ce82f 100644
--- a/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go
+++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1alpha1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1"
diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go
index 6fca6b0654..a95f41b4b5 100644
--- a/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go
+++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go
@@ -18,8 +18,6 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
-
v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
"github.com/openshift/hypershift/client/clientset/clientset/scheme"
rest "k8s.io/client-go/rest"
diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go
index e71feed455..09e3d57c71 100644
--- a/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go
+++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1alpha1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1"
diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go
index da045ce90f..19f9e1b272 100644
--- a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go
+++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1"
diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go
index fd365c96c3..b36f51d26c 100644
--- a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go
+++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go
index 313ecdd31c..d6b0359822 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1beta1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go
index 6d9e62e34d..85b369353c 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go
index 569c9acf28..238d75fd84 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go
index 2f381b4d3a..0d2fa9253b 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go
index 4d15f10169..39db40392b 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go
index e4eff190c0..8886f8249a 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1beta1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go
index d838e1e960..966223dd6a 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1beta1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go b/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go
index 7ce2da0f94..891bd63f4a 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go
@@ -18,8 +18,6 @@ limitations under the License.
package v1beta1
import (
- "net/http"
-
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
"github.com/openshift/hypershift/client/clientset/clientset/scheme"
rest "k8s.io/client-go/rest"
diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go b/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go
index d8dc3b5f9e..6653c7a7a3 100644
--- a/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go
+++ b/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1beta1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1"
diff --git a/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go b/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go
index 85f707895e..1d66ed9d8d 100644
--- a/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go
+++ b/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go
@@ -18,10 +18,7 @@ limitations under the License.
package v1alpha1
import (
- "context"
json "encoding/json"
- "fmt"
- "time"
v1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1"
schedulingv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1"
diff --git a/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go b/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go
index 1ad988f887..dd0e845d70 100644
--- a/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go
+++ b/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go
@@ -18,9 +18,7 @@ limitations under the License.
package fake
import (
- "context"
json "encoding/json"
- "fmt"
v1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1"
schedulingv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1"
diff --git a/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go b/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go
index 91bf604baa..878aa7fb13 100644
--- a/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go
+++ b/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go
@@ -18,8 +18,6 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
-
v1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1"
"github.com/openshift/hypershift/client/clientset/clientset/scheme"
rest "k8s.io/client-go/rest"
diff --git a/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go b/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go
index b79ab1e28e..c03a3cc344 100644
--- a/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go
+++ b/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1alpha1
import (
- "context"
time "time"
certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
diff --git a/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go
index 98eda861a8..0a2df613db 100644
--- a/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go
+++ b/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1alpha1
import (
- "context"
time "time"
certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1"
diff --git a/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go
index bd82a48460..5d74c45312 100644
--- a/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go
+++ b/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1beta1
import (
- "context"
time "time"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
diff --git a/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go b/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go
index 1432ad978a..e06d22481f 100644
--- a/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go
+++ b/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1beta1
import (
- "context"
time "time"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
diff --git a/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go b/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go
index 2da1f3feab..12c6e2f4b8 100644
--- a/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go
+++ b/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1beta1
import (
- "context"
time "time"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
diff --git a/client/informers/externalversions/hypershift/v1beta1/nodepool.go b/client/informers/externalversions/hypershift/v1beta1/nodepool.go
index 9572114de8..262275882f 100644
--- a/client/informers/externalversions/hypershift/v1beta1/nodepool.go
+++ b/client/informers/externalversions/hypershift/v1beta1/nodepool.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1beta1
import (
- "context"
time "time"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
diff --git a/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go b/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go
index da86755963..1a3b5d59b6 100644
--- a/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go
+++ b/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go
@@ -18,7 +18,6 @@ limitations under the License.
package v1alpha1
import (
- "context"
time "time"
schedulingv1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1"
diff --git a/go.mod b/go.mod
index 0e75e1b20f..41d3c5c2fe 100644
--- a/go.mod
+++ b/go.mod
@@ -35,7 +35,7 @@ require (
github.com/go-jose/go-jose/v3 v3.0.3
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
+ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8
github.com/google/cel-go v0.22.0
github.com/google/go-cmp v0.6.0
github.com/google/gofuzz v1.2.0
@@ -46,9 +46,9 @@ require (
github.com/onsi/gomega v1.36.2
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0
- github.com/openshift/api v0.0.0-20250124212313-a770960d61e0
- github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7
- github.com/openshift/cloud-credential-operator v0.0.0-20240814063301-26d835e0fa5e
+ github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f
+ github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a
+ github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4
github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f
github.com/openshift/cluster-node-tuning-operator v0.0.0-20250123060244-3b77fb55b835
github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87
@@ -91,7 +91,7 @@ require (
k8s.io/kube-scheduler v0.32.2
k8s.io/kubectl v0.32.2
k8s.io/pod-security-admission v0.32.2
- k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
+ k8s.io/utils v0.0.0-20241210054802-24370beab758
kubevirt.io/api v1.4.0
kubevirt.io/containerized-data-importer-api v1.61.1
sigs.k8s.io/cluster-api v1.9.4
diff --git a/go.sum b/go.sum
index 4af5613518..3e38e73f57 100644
--- a/go.sum
+++ b/go.sum
@@ -295,8 +295,8 @@ github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
@@ -495,12 +495,12 @@ github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2
github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
-github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 h1:dCvNfygMrPLVNQ06bpHXrxKfrXHiprO4+etHrRUqI8g=
-github.com/openshift/api v0.0.0-20250124212313-a770960d61e0/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
-github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 h1:4iliLcvr1P9EUMZgIaSNEKNQQzBn+L6PSequlFOuB6Q=
-github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7/go.mod h1:2tcufBE4Cu6RNgDCxcUJepa530kGo5GFVfR9BSnndhI=
-github.com/openshift/cloud-credential-operator v0.0.0-20240814063301-26d835e0fa5e h1:X0et0+WqSmnW0CS//XdLFLvJvzt8TFJzfFE1XJ2rXNg=
-github.com/openshift/cloud-credential-operator v0.0.0-20240814063301-26d835e0fa5e/go.mod h1:4AWWBNPuWzPtT77xDONlObrazPlBCKXd+16lupnIrQc=
+github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f h1:HH+BqfxONWB2kknhCLAjiAievsyb56WvLGXdNOseI8g=
+github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
+github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a h1:duO3JMrUOqVx50QhzxvDeOYIwTNOB8/EEuRLPyvAMBg=
+github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a/go.mod h1:Qw3ThpzVZ0bfTILpBNYg4LGyjtNxfyCiGh/uDLOOTP8=
+github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4 h1:nrD3npDGt5bvwNXZKTzzEuZTI/4Uo5PbrkpAjfxhxtE=
+github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4/go.mod h1:Lzu29TMne5LsgPnyw2n9jrPiD5t6uyG5aE6KFy8cz6w=
github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f h1:YWIaRFHMNzxi26FbT0TE9zl4pBzfeiZjQJv0JUzibUc=
github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f/go.mod h1:RNzrOK4TA6LdyrgrEvb1TXjOnhRC+iBmnBoAa/4ivBw=
github.com/openshift/cluster-node-tuning-operator v0.0.0-20250123060244-3b77fb55b835 h1:AN2ZaBNt5YrJmO7QbE4ULW4UD+8RnJRtpLuchcLVAno=
@@ -955,8 +955,8 @@ k8s.io/pod-security-admission v0.32.2 h1:zDfAb/t0LbNU3z0ZMHtCb1zp8x05gWCGhmBYpUp
k8s.io/pod-security-admission v0.32.2/go.mod h1:yxMPB3i1pGMLfxbe4BiWMuowMD7cdHR32y4nCj4wH+s=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
+k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd h1:KJXBX9dOmRTUWduHg1gnWtPGIEl+GMh8UHdrBEZgOXE=
knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd/go.mod h1:36cYnaOVHkzmhgybmYX6zDaTl3PakFeJQJl7wi6/RLE=
kubevirt.io/api v1.4.0 h1:dDLyQLSp9obzsDrv3cyL1olIc/66IWVaGiR3gfPfgT0=
diff --git a/hack/tools/go.mod b/hack/tools/go.mod
index bf5e8adf03..8ee6c5791c 100644
--- a/hack/tools/go.mod
+++ b/hack/tools/go.mod
@@ -4,56 +4,63 @@ go 1.23.0
require (
github.com/ahmetb/gen-crd-api-reference-docs v0.3.0
- github.com/golangci/golangci-lint v1.62.0
- github.com/openshift/api/tools v0.0.0-20241014171745-f199b1f2660c
+ github.com/golangci/golangci-lint v1.63.4
+ github.com/openshift/api/tools v0.0.0-20250213010142-f5b09d13c01f
gotest.tools/gotestsum v1.12.0
honnef.co/go/tools v0.5.1
- k8s.io/code-generator v0.31.1
- k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+ k8s.io/code-generator v0.32.1
+ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
)
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
+ cel.dev/expr v0.18.0 // indirect
dario.cat/mergo v1.0.0 // indirect
- github.com/4meepo/tagalign v1.3.4 // indirect
+ github.com/4meepo/tagalign v1.4.1 // indirect
github.com/Abirdcfly/dupword v0.1.3 // indirect
github.com/Antonboom/errname v1.0.0 // indirect
- github.com/Antonboom/nilnil v1.0.0 // indirect
- github.com/Antonboom/testifylint v1.5.0 // indirect
+ github.com/Antonboom/nilnil v1.0.1 // indirect
+ github.com/Antonboom/testifylint v1.5.2 // indirect
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
- github.com/Crocmagnon/fatcontext v0.5.2 // indirect
+ github.com/Crocmagnon/fatcontext v0.5.3 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect
+ github.com/JoelSpeed/kal v0.0.0-20250121143106-304c215e474e // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
github.com/ProtonMail/go-crypto v1.1.3 // indirect
github.com/a8m/envsubst v1.4.2 // indirect
- github.com/alecthomas/go-check-sumtype v0.2.0 // indirect
+ github.com/alecthomas/go-check-sumtype v0.3.1 // indirect
github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/alexkohler/nakedret/v2 v2.0.5 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect
github.com/alingse/asasalint v0.0.11 // indirect
+ github.com/alingse/nilnesserr v0.1.1 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/ashanbrown/forbidigo v1.6.0 // indirect
- github.com/ashanbrown/makezero v1.1.1 // indirect
+ github.com/ashanbrown/makezero v1.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bitfield/gotestdox v0.2.2 // indirect
github.com/bkielbasa/cyclop v1.2.3 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
github.com/blizzy78/varnamelen v0.8.0 // indirect
- github.com/bombsimon/wsl/v4 v4.4.1 // indirect
+ github.com/bombsimon/wsl/v4 v4.5.0 // indirect
github.com/breml/bidichk v0.3.2 // indirect
github.com/breml/errchkjson v0.4.0 // indirect
- github.com/butuzov/ireturn v0.3.0 // indirect
- github.com/butuzov/mirror v1.2.0 // indirect
+ github.com/butuzov/ireturn v0.3.1 // indirect
+ github.com/butuzov/mirror v1.3.0 // indirect
github.com/catenacyber/perfsprint v0.7.1 // indirect
github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charithe/durationcheck v0.0.10 // indirect
github.com/chavacava/garif v0.1.0 // indirect
- github.com/ckaznocha/intrange v0.2.1 // indirect
+ github.com/ckaznocha/intrange v0.3.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
- github.com/curioswitch/go-reassign v0.2.0 // indirect
+ github.com/curioswitch/go-reassign v0.3.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/daixiang0/gci v0.13.5 // indirect
github.com/dave/dst v0.27.3 // indirect
@@ -67,6 +74,7 @@ require (
github.com/ettle/strcase v0.2.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/firefart/nonamedreturns v1.0.5 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
@@ -78,9 +86,10 @@ require (
github.com/go-git/go-billy/v5 v5.6.0 // indirect
github.com/go-git/go-git/v5 v5.13.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
- github.com/go-openapi/jsonpointer v0.20.0 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
github.com/go-toolsmith/astequal v1.2.0 // indirect
@@ -89,45 +98,49 @@ require (
github.com/go-toolsmith/strparse v1.1.0 // indirect
github.com/go-toolsmith/typep v1.1.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
- github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
+ github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect
github.com/gobuffalo/flect v1.0.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.10.3 // indirect
- github.com/goccy/go-yaml v1.11.3 // indirect
+ github.com/goccy/go-yaml v1.13.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
github.com/golangci/go-printf-func-name v0.1.0 // indirect
- github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect
+ github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 // indirect
github.com/golangci/misspell v0.6.0 // indirect
- github.com/golangci/modinfo v0.3.4 // indirect
github.com/golangci/plugin-module-register v0.1.1 // indirect
github.com/golangci/revgrep v0.5.3 // indirect
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect
+ github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/google/uuid v1.6.0 // indirect
github.com/gordonklaus/ineffassign v0.1.0 // indirect
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
github.com/gostaticanalysis/comment v1.4.2 // indirect
github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+ github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
- github.com/jessevdk/go-flags v1.5.0 // indirect
+ github.com/jessevdk/go-flags v1.6.1 // indirect
github.com/jgautheron/goconst v1.7.1 // indirect
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
- github.com/jjti/go-spancheck v0.6.2 // indirect
+ github.com/jjti/go-spancheck v0.6.4 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/julz/importas v0.1.0 // indirect
+ github.com/julz/importas v0.2.0 // indirect
github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/kisielk/errcheck v1.8.0 // indirect
@@ -137,8 +150,11 @@ require (
github.com/kunwardeep/paralleltest v1.0.10 // indirect
github.com/kyoh86/exportloopref v0.1.11 // indirect
github.com/lasiar/canonicalheader v1.1.2 // indirect
- github.com/ldez/gomoddirectives v0.2.4 // indirect
- github.com/ldez/tagliatelle v0.5.0 // indirect
+ github.com/ldez/exptostd v0.3.1 // indirect
+ github.com/ldez/gomoddirectives v0.6.0 // indirect
+ github.com/ldez/grignotin v0.7.0 // indirect
+ github.com/ldez/tagliatelle v0.7.1 // indirect
+ github.com/ldez/usetesting v0.4.2 // indirect
github.com/leonklingele/grouper v1.1.2 // indirect
github.com/macabu/inamedparam v0.1.3 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -149,8 +165,8 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
- github.com/mgechev/revive v1.5.0 // indirect
- github.com/mikefarah/yq/v4 v4.44.2 // indirect
+ github.com/mgechev/revive v1.5.1 // indirect
+ github.com/mikefarah/yq/v4 v4.44.5 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -160,15 +176,15 @@ require (
github.com/nakabonne/nestif v0.3.1 // indirect
github.com/nishanths/exhaustive v0.12.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
- github.com/nunnatsa/ginkgolinter v0.18.0 // indirect
+ github.com/nunnatsa/ginkgolinter v0.18.4 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
- github.com/openshift/crd-schema-checker v0.0.0-20240927092539-11d809bb1183 // indirect
+ github.com/openshift/crd-schema-checker v0.0.0-20241113192003-573763d3107a // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/polyfloyd/go-errorlint v1.6.0 // indirect
+ github.com/polyfloyd/go-errorlint v1.7.0 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
@@ -178,17 +194,17 @@ require (
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
- github.com/raeperd/recvcheck v0.1.2 // indirect
+ github.com/raeperd/recvcheck v0.2.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/ryancurrah/gomodguard v1.3.5 // indirect
github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
- github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
- github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
+ github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect
+ github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
- github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect
+ github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect
github.com/securego/gosec/v2 v2.21.4 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
@@ -206,20 +222,21 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.12.0 // indirect
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
- github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
+ github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
- github.com/tdakkota/asciicheck v0.2.0 // indirect
- github.com/tetafro/godot v1.4.18 // indirect
- github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
+ github.com/tdakkota/asciicheck v0.3.0 // indirect
+ github.com/tetafro/godot v1.4.20 // indirect
+ github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect
github.com/timonwong/loggercheck v0.10.1 // indirect
- github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect
+ github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
- github.com/ultraware/funlen v0.1.0 // indirect
- github.com/ultraware/whitespace v0.1.1 // indirect
- github.com/uudashr/gocognit v1.1.3 // indirect
- github.com/uudashr/iface v1.2.0 // indirect
+ github.com/ultraware/funlen v0.2.0 // indirect
+ github.com/ultraware/whitespace v0.2.0 // indirect
+ github.com/uudashr/gocognit v1.2.0 // indirect
+ github.com/uudashr/iface v1.3.0 // indirect
github.com/vmware-archive/yaml-patch v0.0.11 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
@@ -231,40 +248,56 @@ require (
gitlab.com/bosi/decorder v0.4.2 // indirect
go-simpler.org/musttag v0.13.0 // indirect
go-simpler.org/sloglint v0.7.2 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
+ go.opentelemetry.io/otel v1.29.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
+ go.opentelemetry.io/otel/metric v1.29.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.28.0 // indirect
+ go.opentelemetry.io/otel/trace v1.29.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.26.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
- golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.33.0 // indirect
+ golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
- golang.org/x/tools v0.27.0 // indirect
- golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ golang.org/x/time v0.7.0 // indirect
+ golang.org/x/tools v0.28.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect
+ google.golang.org/grpc v1.67.0 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/api v0.31.1 // indirect
- k8s.io/apiextensions-apiserver v0.31.1 // indirect
- k8s.io/apimachinery v0.31.1 // indirect
+ k8s.io/api v0.32.1 // indirect
+ k8s.io/apiextensions-apiserver v0.32.1 // indirect
+ k8s.io/apimachinery v0.32.1 // indirect
+ k8s.io/apiserver v0.32.1 // indirect
+ k8s.io/client-go v0.32.1 // indirect
+ k8s.io/component-base v0.32.1 // indirect
k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 // indirect
- k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
+ k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
k8s.io/klog v1.0.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
+ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
mvdan.cc/gofumpt v0.7.0 // indirect
mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
sigs.k8s.io/controller-tools v0.15.0 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/hack/tools/go.sum b/hack/tools/go.sum
index 0050e38f41..d15bc6887d 100644
--- a/hack/tools/go.sum
+++ b/hack/tools/go.sum
@@ -2,26 +2,30 @@
4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc=
4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
+cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
+cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
-github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8=
-github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0=
+github.com/4meepo/tagalign v1.4.1 h1:GYTu2FaPGOGb/xJalcqHeD4il5BiCywyEYZOA55P6J4=
+github.com/4meepo/tagalign v1.4.1/go.mod h1:2H9Yu6sZ67hmuraFgfZkNcg5Py9Ch/Om9l2K/2W1qS4=
github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE=
github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw=
github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA=
github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI=
-github.com/Antonboom/nilnil v1.0.0 h1:n+v+B12dsE5tbAqRODXmEKfZv9j2KcTBrp+LkoM4HZk=
-github.com/Antonboom/nilnil v1.0.0/go.mod h1:fDJ1FSFoLN6yoG65ANb1WihItf6qt9PJVTn/s2IrcII=
-github.com/Antonboom/testifylint v1.5.0 h1:dlUIsDMtCrZWUnvkaCz3quJCoIjaGi41GzjPBGkkJ8A=
-github.com/Antonboom/testifylint v1.5.0/go.mod h1:wqaJbu0Blb5Wag2wv7Z5xt+CIV+eVLxtGZrlK13z3AE=
+github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs=
+github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0=
+github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk=
+github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8=
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
-github.com/Crocmagnon/fatcontext v0.5.2 h1:vhSEg8Gqng8awhPju2w7MKHqMlg4/NI+gSDHtR3xgwA=
-github.com/Crocmagnon/fatcontext v0.5.2/go.mod h1:87XhRMaInHP44Q7Tlc7jkgKKB7kZAOPiDkFMdKCC+74=
+github.com/Crocmagnon/fatcontext v0.5.3 h1:zCh/wjc9oyeF+Gmp+V60wetm8ph2tlsxocgg/J0hOps=
+github.com/Crocmagnon/fatcontext v0.5.3/go.mod h1:XoCQYY1J+XTfyv74qLXvNw4xFunr3L1wkopIIKG7wGM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU=
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao=
+github.com/JoelSpeed/kal v0.0.0-20250121143106-304c215e474e h1:H7jNmU1/bzAi4YFVpRPUzE3hzcrOCk1H6qJpLVCAPaI=
+github.com/JoelSpeed/kal v0.0.0-20250121143106-304c215e474e/go.mod h1:zptAqBcJGHljQfJR7oQXVKS/E6KVUFZDkE7OJDQeKOw=
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
@@ -35,10 +39,10 @@ github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg=
github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 h1:+XfOU14S4bGuwyvCijJwhhBIjYN+YXS18jrCY2EzJaY=
github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8=
-github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0=
-github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
-github.com/alecthomas/go-check-sumtype v0.2.0 h1:Bo+e4DFf3rs7ME9w/0SU/g6nmzJaphduP8Cjiz0gbwY=
-github.com/alecthomas/go-check-sumtype v0.2.0/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
+github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
+github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
+github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU=
+github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E=
github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8=
github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c=
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
@@ -49,50 +53,64 @@ github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pO
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
+github.com/alingse/nilnesserr v0.1.1 h1:7cYuJewpy9jFNMEA72Q1+3Nm3zKHzg+Q28D5f2bBFUA=
+github.com/alingse/nilnesserr v0.1.1/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY=
github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
-github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
-github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
+github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU=
+github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitfield/gotestdox v0.2.2 h1:x6RcPAbBbErKLnapz1QeAlf3ospg8efBsedU93CDsnE=
github.com/bitfield/gotestdox v0.2.2/go.mod h1:D+gwtS0urjBrzguAkTM2wodsTQYFHdpx8eqRJ3N+9pY=
github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w=
github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
-github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw=
-github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo=
+github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A=
+github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc=
github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs=
github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos=
github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk=
github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8=
-github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0=
-github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA=
-github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs=
-github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ=
+github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY=
+github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M=
+github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc=
+github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI=
github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc=
github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg=
github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4=
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
-github.com/ckaznocha/intrange v0.2.1 h1:M07spnNEQoALOJhwrImSrJLaxwuiQK+hA2DeajBlwYk=
-github.com/ckaznocha/intrange v0.2.1/go.mod h1:7NEhVyf8fzZO5Ds7CRaqPEm52Ut83hsTiL5zbER/HYk=
+github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY=
+github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
-github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
+github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs=
+github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88=
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c=
@@ -109,6 +127,8 @@ github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42
github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk=
github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE=
github.com/elazarl/goproxy v1.2.1 h1:njjgvO6cRG9rIqN2ebkqy6cQz2Njkx7Fsfv/zIZqgug=
@@ -127,6 +147,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA=
github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
@@ -137,6 +159,8 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
+github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
+github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghostiam/protogetter v0.3.8 h1:LYcXbYvybUyTIxN2Mj9h6rHrDZBDwZloPoKctWrFyJY=
@@ -155,22 +179,25 @@ github.com/go-git/go-git/v5 v5.13.0 h1:vLn5wlGIh/X78El6r3Jr+30W16Blk0CTcxTYcYPWi
github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkvVkiXRR/zw=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
-github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
-github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA=
+github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
@@ -196,16 +223,16 @@ github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUN
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
-github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U=
-github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY=
+github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA=
github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
-github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I=
-github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
+github.com/goccy/go-yaml v1.13.0 h1:0Wtp0FZLd7Sm8gERmR9S6Iczzb3vItJj7NaHmFg8pTs=
+github.com/goccy/go-yaml v1.13.0/go.mod h1:IjYwxUiJDoqpx2RmbdjMUceGHZwYLon3sfOGl5Hi9lc=
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@@ -218,20 +245,22 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU=
github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s=
-github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME=
-github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE=
-github.com/golangci/golangci-lint v1.62.0 h1:/G0g+bi1BhmGJqLdNQkKBWjcim8HjOPc4tsKuHDOhcI=
-github.com/golangci/golangci-lint v1.62.0/go.mod h1:jtoOhQcKTz8B6dGNFyfQV3WZkQk+YvBDewDtNpiAJts=
+github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 h1:t5wybL6RtO83VwoMOb7U/Peqe3gGKQlPIC66wXmnkvM=
+github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9/go.mod h1:Ag3L7sh7E28qAp/5xnpMMTuGYqxLZoSaEHZDkZB1RgU=
+github.com/golangci/golangci-lint v1.63.4 h1:bJQFQ3hSfUto597dkL7ipDzOxsGEpiWdLiZ359OWOBI=
+github.com/golangci/golangci-lint v1.63.4/go.mod h1:Hx0B7Lg5/NXbaOHem8+KU+ZUIzMI6zNj/7tFwdnn10I=
github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs=
github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo=
-github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA=
-github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM=
github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c=
github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc=
github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs=
github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k=
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs=
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
+github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -247,10 +276,12 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
-github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s=
github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
@@ -263,11 +294,21 @@ github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3
github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk=
github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
-github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
-github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
+github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8=
+github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo=
+github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw=
+github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
+github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
@@ -276,22 +317,22 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
-github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
+github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk=
github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
-github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk=
-github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA=
+github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc=
+github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
-github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
+github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ=
+github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY=
github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos=
github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
@@ -321,12 +362,18 @@ github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4=
github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI=
-github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg=
-github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g=
-github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo=
-github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
-github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/ldez/exptostd v0.3.1 h1:90yWWoAKMFHeovTK8uzBms9Ppp8Du/xQ20DRO26Ymrw=
+github.com/ldez/exptostd v0.3.1/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ=
+github.com/ldez/gomoddirectives v0.6.0 h1:Jyf1ZdTeiIB4dd+2n4qw+g4aI9IJ6JyfOZ8BityWvnA=
+github.com/ldez/gomoddirectives v0.6.0/go.mod h1:TuwOGYoPAoENDWQpe8DMqEm5nIfjrxZXmxX/CExWyZ4=
+github.com/ldez/grignotin v0.7.0 h1:vh0dI32WhHaq6LLPZ38g7WxXuZ1+RzyrJ7iPG9JMa8c=
+github.com/ldez/grignotin v0.7.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk=
+github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk=
+github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I=
+github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA=
+github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY=
github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA=
github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk=
@@ -353,10 +400,10 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mgechev/revive v1.5.0 h1:oaSmjA7rP8+HyoRuCgC531VHwnLH1AlJdjj+1AnQceQ=
-github.com/mgechev/revive v1.5.0/go.mod h1:L6T3H8EoerRO86c7WuGpvohIUmiploGiyoYbtIWFmV8=
-github.com/mikefarah/yq/v4 v4.44.2 h1:J+ezWCDTg+SUs0jXdcE0HIPH1+rEr0Tbn9Y1SwiWtH0=
-github.com/mikefarah/yq/v4 v4.44.2/go.mod h1:9bnz36uZJDEyxdIjRronBcqStS953k3y3DrSRXr4F/w=
+github.com/mgechev/revive v1.5.1 h1:hE+QPeq0/wIzJwOphdVyUJ82njdd8Khp4fUIHGZHW3M=
+github.com/mgechev/revive v1.5.1/go.mod h1:lC9AhkJIBs5zwx8wkudyHrU+IJkrEKmpCmGMnIJPk4o=
+github.com/mikefarah/yq/v4 v4.44.5 h1:/Xm1dM1BfyDJMg+yIpnl2AgpmLFQg3Lcm/kuyYgHEXE=
+github.com/mikefarah/yq/v4 v4.44.5/go.mod h1:rpn3xGVz+2pDuLJTlCvzatCwTmmUeHcm7MbkbtHdvkc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -376,24 +423,24 @@ github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhK
github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
-github.com/nunnatsa/ginkgolinter v0.18.0 h1:ZXO1wKhPg3A6LpbN5dMuqwhfOjN5c3ous8YdKOuqk9k=
-github.com/nunnatsa/ginkgolinter v0.18.0/go.mod h1:vPrWafSULmjMGCMsfGA908if95VnHQNAahvSBOjTuWs=
+github.com/nunnatsa/ginkgolinter v0.18.4 h1:zmX4KUR+6fk/vhUFt8DOP6KwznekhkmVSzzVJve2vyM=
+github.com/nunnatsa/ginkgolinter v0.18.4/go.mod h1:AMEane4QQ6JwFz5GgjI5xLUM9S/CylO+UyM97fN2iBI=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
-github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
-github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
-github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
-github.com/openshift/api/tools v0.0.0-20241014171745-f199b1f2660c h1:Yqb9U5t5VW0nY2lfhxJ3dkpcOipAdCpQKfnplg5y8PI=
-github.com/openshift/api/tools v0.0.0-20241014171745-f199b1f2660c/go.mod h1:AOqVox+u6bxjXoSTHI421YuBBY5YIkhI/2AcdbRpPxU=
+github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
+github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
+github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/openshift/api/tools v0.0.0-20250213010142-f5b09d13c01f h1:6+KhkIK8YWxh4n+qD5aypR43Vozm4QWfNwXI5yWJzTw=
+github.com/openshift/api/tools v0.0.0-20250213010142-f5b09d13c01f/go.mod h1:NDQL+y3cc/O4/qgC8nEo9+RPDx47NPqI6SiVKLydzBQ=
github.com/openshift/controller-tools v0.0.0-20240809124726-b901265f16d5 h1:ImjAJqnPX8ILYw1iaZkToByrUqBHhCmYw+Bk1uEVKRo=
github.com/openshift/controller-tools v0.0.0-20240809124726-b901265f16d5/go.mod h1:80xsUppuf2iNgiThH2bzDIN5204p5E93z+YtNnAJlHA=
-github.com/openshift/crd-schema-checker v0.0.0-20240927092539-11d809bb1183 h1:5QvKMDlTFpY9+Md+qYv1vVXT0Rt/VhAqtXeoauyzNxc=
-github.com/openshift/crd-schema-checker v0.0.0-20240927092539-11d809bb1183/go.mod h1:sTxJ4ZFW9r9fEdbW2v0yMRi6NcyTbx0fII4p83IQ+L8=
+github.com/openshift/crd-schema-checker v0.0.0-20241113192003-573763d3107a h1:gBheMF1vVwxfnuGHJ82f/nmUATdtewq60/yhbBqD4+M=
+github.com/openshift/crd-schema-checker v0.0.0-20241113192003-573763d3107a/go.mod h1:sTxJ4ZFW9r9fEdbW2v0yMRi6NcyTbx0fII4p83IQ+L8=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
@@ -415,8 +462,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY=
-github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw=
+github.com/polyfloyd/go-errorlint v1.7.0 h1:Zp6lzCK4hpBDj8y8a237YK4EPrMXQWvOe3nGoH4pFrU=
+github.com/polyfloyd/go-errorlint v1.7.0/go.mod h1:dGWKu85mGHnegQ2SWpEybFityCg3j7ZbwsVUxAOk9gY=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
@@ -437,8 +484,8 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
-github.com/raeperd/recvcheck v0.1.2 h1:SjdquRsRXJc26eSonWIo8b7IMtKD3OAT2Lb5G3ZX1+4=
-github.com/raeperd/recvcheck v0.1.2/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU=
+github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI=
+github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -454,14 +501,14 @@ github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV
github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE=
github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
-github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
-github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
-github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
-github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
+github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0=
+github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
-github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI=
-github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
+github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ=
+github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
github.com/securego/gosec/v2 v2.21.4 h1:Le8MSj0PDmOnHJgUATjD96PaXRvCpKC+DGJvwyy0Mlk=
github.com/securego/gosec/v2 v2.21.4/go.mod h1:Jtb/MwRQfRxCXyCm1rfM1BEiiiTfUOdyzzAhlr6lUTA=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
@@ -499,8 +546,10 @@ github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
-github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
-github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
+github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4=
+github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -519,30 +568,30 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM=
-github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
+github.com/tdakkota/asciicheck v0.3.0 h1:LqDGgZdholxZMaJgpM6b0U9CFIjDCbFdUF00bDnBKOQ=
+github.com/tdakkota/asciicheck v0.3.0/go.mod h1:KoJKXuX/Z/lt6XzLo8WMBfQGzak0SrAKZlvRr4tg8Ac=
github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
-github.com/tetafro/godot v1.4.18 h1:ouX3XGiziKDypbpXqShBfnNLTSjR8r3/HVzrtJ+bHlI=
-github.com/tetafro/godot v1.4.18/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
-github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
-github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
+github.com/tetafro/godot v1.4.20 h1:z/p8Ek55UdNvzt4TFn2zx2KscpW4rWqcnUrdmvWJj7E=
+github.com/tetafro/godot v1.4.20/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
+github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg=
+github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460=
github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg=
github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8=
-github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4=
-github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
+github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg=
+github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
-github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
-github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
-github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ=
-github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8=
-github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM=
-github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U=
-github.com/uudashr/iface v1.2.0 h1:ECJjh5q/1Zmnv/2yFpWV6H3oMg5+Mo+vL0aqw9Gjazo=
-github.com/uudashr/iface v1.2.0/go.mod h1:Ux/7d/rAF3owK4m53cTVXL4YoVHKNqnoOeQHn2xrlp0=
+github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI=
+github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA=
+github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g=
+github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8=
+github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA=
+github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU=
+github.com/uudashr/iface v1.3.0 h1:zwPch0fs9tdh9BmL5kcgSpvnObV+yHjO4JjVBl8IA10=
+github.com/uudashr/iface v1.3.0/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg=
github.com/vmware-archive/yaml-patch v0.0.11 h1:9o4FGgkpLD88A5O7BOOXs7UBeeymRT9atLsKmHJ2wWs=
github.com/vmware-archive/yaml-patch v0.0.11/go.mod h1:mHWEn1O1CU3yBnN6iPFeAwAqzUibF2X+9EltQ28w+Vs=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
@@ -574,14 +623,38 @@ go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE=
go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM=
go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY=
go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo=
+go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
+go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
+go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
+go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
+go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
+go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
+go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
+go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
+go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
+go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
+go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
-go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
-go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
-go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -590,6 +663,7 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
@@ -598,13 +672,12 @@ golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWB
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 h1:bVwtbF629Xlyxk6xLQq2TDYmqP0uiWaet5LwRebuY0k=
-golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f h1:WTyX8eCCyfdqiPYkRGm0MqElSfYFH3yR1+rl/mct9sA=
+golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
@@ -612,6 +685,7 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
@@ -631,13 +705,17 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -646,6 +724,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
@@ -655,9 +734,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -676,6 +753,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
@@ -688,9 +766,11 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
@@ -703,12 +783,15 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
+golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -721,35 +804,41 @@ golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
-golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
-golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
+golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
+golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
@@ -773,37 +862,45 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
-k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
-k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
-k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40=
-k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ=
-k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
-k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
+k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
+k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw=
+k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto=
+k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
+k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak=
+k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw=
+k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
+k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/code-generator v0.30.1 h1:ZsG++q5Vt0ScmKCeLhynUuWgcwFGg1Hl1AGfatqPJBI=
k8s.io/code-generator v0.30.1/go.mod h1:hFgxRsvOUg79mbpbVKfjJvRhVz1qLoe40yZDJ/hwRH4=
+k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk=
+k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w=
k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 h1:wBIDZID8ju9pwOiLlV22YYKjFGtiNSWgHf5CnKLRUuM=
k8s.io/gengo v0.0.0-20240404160639-a0386bf69313/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
-k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
+k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
+k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU=
mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo=
mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U=
mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/hack/tools/vendor/cel.dev/expr/.bazelversion b/hack/tools/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 0000000000..26bc914a3b
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.0.1
+# Keep this pinned version in parity with cel-go
diff --git a/hack/tools/vendor/cel.dev/expr/.gitattributes b/hack/tools/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 0000000000..3de1ec213a
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/hack/tools/vendor/cel.dev/expr/.gitignore b/hack/tools/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 0000000000..0d4fed27c9
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/hack/tools/vendor/cel.dev/expr/BUILD.bazel b/hack/tools/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 0000000000..37d8adc950
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/hack/tools/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/hack/tools/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..59908e2d8e
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/hack/tools/vendor/cel.dev/expr/CONTRIBUTING.md b/hack/tools/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 0000000000..8f5fd5c31f
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/hack/tools/vendor/cel.dev/expr/GOVERNANCE.md b/hack/tools/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 0000000000..0a525bc17d
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/hack/tools/vendor/cel.dev/expr/LICENSE b/hack/tools/vendor/cel.dev/expr/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/hack/tools/vendor/cel.dev/expr/MAINTAINERS.md b/hack/tools/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 0000000000..1ed2eb8ab3
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/hack/tools/vendor/cel.dev/expr/MODULE.bazel b/hack/tools/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 0000000000..9794266f56
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,70 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.36.0",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20240819-fe8ba054a",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "26.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.9",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.49.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "6.0.0",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules")
+switched_rules.use_languages(
+ cc = True,
+ go = True,
+ java = True,
+)
+use_repo(switched_rules, "com_google_googleapis_imports")
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.21.1")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/hack/tools/vendor/cel.dev/expr/README.md b/hack/tools/vendor/cel.dev/expr/README.md
new file mode 100644
index 0000000000..7930c0b755
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/README.md
@@ -0,0 +1,73 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
+
+Disclaimer: This is not an official Google product.
diff --git a/hack/tools/vendor/cel.dev/expr/WORKSPACE b/hack/tools/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 0000000000..b6dc9ed673
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/hack/tools/vendor/cel.dev/expr/WORKSPACE.bzlmod b/hack/tools/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hack/tools/vendor/cel.dev/expr/checked.pb.go b/hack/tools/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 0000000000..bb225c8ab3
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/cel.dev/expr/cloudbuild.yaml b/hack/tools/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 0000000000..c40881f122
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.0.1'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/hack/tools/vendor/cel.dev/expr/eval.pb.go b/hack/tools/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 0000000000..8f651f9cc6
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,490 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *ExprValue) GetKind() isExprValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x, ok := x.GetKind().(*ExprValue_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x, ok := x.GetKind().(*ExprValue_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x, ok := x.GetKind().(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*status.Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
+ 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
+ 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
+ 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76,
+ 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_eval_proto_goTypes = []interface{}{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*UnknownSet)(nil), // 3: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result
+ (*Value)(nil), // 5: cel.expr.Value
+ (*status.Status)(nil), // 6: google.rpc.Status
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExprValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ErrorSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnknownSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState_Result); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/cel.dev/expr/explain.pb.go b/hack/tools/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 0000000000..79fd5443b9
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/cel.dev/expr/regen_go_proto.sh b/hack/tools/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 0000000000..fdcbb3ce25
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/hack/tools/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/hack/tools/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 0000000000..9a13479e40
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/hack/tools/vendor/cel.dev/expr/syntax.pb.go b/hack/tools/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 0000000000..48a952872e
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/cel.dev/expr/value.pb.go b/hack/tools/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 0000000000..e5e29228c2
--- /dev/null
+++ b/hack/tools/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore b/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore
index e37bb52e49..1c6218ee29 100644
--- a/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore
+++ b/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore
@@ -17,6 +17,7 @@
*.test
.vscode
+.idea/
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml
index e7b6f6800e..37dfec7c88 100644
--- a/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml
+++ b/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml
@@ -1,4 +1,4 @@
----
+version: 2
project_name: tagalign
release:
@@ -29,4 +29,4 @@ builds:
goarch: 386
- goos: freebsd
goarch: arm64
- main: ./cmd/tagalign/
\ No newline at end of file
+ main: ./cmd/tagalign/
diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/options.go b/hack/tools/vendor/github.com/4meepo/tagalign/options.go
index ddec98da73..2a78592465 100644
--- a/hack/tools/vendor/github.com/4meepo/tagalign/options.go
+++ b/hack/tools/vendor/github.com/4meepo/tagalign/options.go
@@ -2,13 +2,6 @@ package tagalign
type Option func(*Helper)
-// WithMode specify the mode of tagalign.
-func WithMode(mode Mode) Option {
- return func(h *Helper) {
- h.mode = mode
- }
-}
-
// WithSort enable tags sort.
// fixedOrder specify the order of tags, the other tags will be sorted by name.
// Sory is disabled by default.
diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go b/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go
index 4734b56661..8161a0aa7f 100644
--- a/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go
+++ b/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go
@@ -1,27 +1,19 @@
package tagalign
import (
+ "cmp"
"fmt"
"go/ast"
"go/token"
- "log"
"reflect"
- "sort"
+ "slices"
"strconv"
"strings"
"github.com/fatih/structtag"
-
"golang.org/x/tools/go/analysis"
)
-type Mode int
-
-const (
- StandaloneMode Mode = iota
- GolangciLintMode
-)
-
type Style int
const (
@@ -44,11 +36,14 @@ func NewAnalyzer(options ...Option) *analysis.Analyzer {
}
}
-func Run(pass *analysis.Pass, options ...Option) []Issue {
- var issues []Issue
+func Run(pass *analysis.Pass, options ...Option) {
for _, f := range pass.Files {
+ filename := getFilename(pass.Fset, f)
+ if !strings.HasSuffix(filename, ".go") {
+ continue
+ }
+
h := &Helper{
- mode: StandaloneMode,
style: DefaultStyle,
align: true,
}
@@ -63,22 +58,19 @@ func Run(pass *analysis.Pass, options ...Option) []Issue {
if !h.align && !h.sort {
// do nothing
- return nil
+ return
}
ast.Inspect(f, func(n ast.Node) bool {
h.find(pass, n)
return true
})
+
h.Process(pass)
- issues = append(issues, h.issues...)
}
- return issues
}
type Helper struct {
- mode Mode
-
style Style
align bool // whether enable tags align.
@@ -87,19 +79,6 @@ type Helper struct {
singleFields []*ast.Field
consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct.
- issues []Issue
-}
-
-// Issue is used to integrate with golangci-lint's inline auto fix.
-type Issue struct {
- Pos token.Position
- Message string
- InlineFix InlineFix
-}
-type InlineFix struct {
- StartCol int // zero-based
- Length int
- NewString string
}
func (w *Helper) find(pass *analysis.Pass, n ast.Node) {
@@ -159,42 +138,28 @@ func (w *Helper) find(pass *analysis.Pass, n ast.Node) {
split()
}
-func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) {
- if w.mode == GolangciLintMode {
- iss := Issue{
- Pos: pass.Fset.Position(field.Tag.Pos()),
- Message: msg,
- InlineFix: InlineFix{
- StartCol: startCol,
- Length: len(field.Tag.Value),
- NewString: replaceStr,
- },
- }
- w.issues = append(w.issues, iss)
- }
-
- if w.mode == StandaloneMode {
- pass.Report(analysis.Diagnostic{
- Pos: field.Tag.Pos(),
- End: field.Tag.End(),
- Message: msg,
- SuggestedFixes: []analysis.SuggestedFix{
- {
- Message: msg,
- TextEdits: []analysis.TextEdit{
- {
- Pos: field.Tag.Pos(),
- End: field.Tag.End(),
- NewText: []byte(replaceStr),
- },
+func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr string) {
+ pass.Report(analysis.Diagnostic{
+ Pos: field.Tag.Pos(),
+ End: field.Tag.End(),
+ Message: msg,
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: msg,
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: field.Tag.Pos(),
+ End: field.Tag.End(),
+ NewText: []byte(replaceStr),
},
},
},
- })
- }
+ },
+ })
}
-func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
+//nolint:gocognit,gocyclo,nestif
+func (w *Helper) Process(pass *analysis.Pass) {
// process grouped fields
for _, fields := range w.consecutiveFieldsGroups {
offsets := make([]int, len(fields))
@@ -220,7 +185,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
tag, err := strconv.Unquote(field.Tag.Value)
if err != nil {
// if tag value is not a valid string, report it directly
- w.report(pass, field, column, errTagValueSyntax, field.Tag.Value)
+ w.report(pass, field, errTagValueSyntax, field.Tag.Value)
fields = removeField(fields, i)
continue
}
@@ -228,7 +193,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
tags, err := structtag.Parse(tag)
if err != nil {
// if tag value is not a valid struct tag, report it directly
- w.report(pass, field, column, err.Error(), field.Tag.Value)
+ w.report(pass, field, err.Error(), field.Tag.Value)
fields = removeField(fields, i)
continue
}
@@ -241,7 +206,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
cp[i] = tag
}
notSortedTagsGroup = append(notSortedTagsGroup, cp)
- sortBy(w.fixedTagOrder, tags)
+ sortTags(w.fixedTagOrder, tags)
}
for _, t := range tags.Tags() {
addKey(t.Key)
@@ -252,7 +217,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
}
if w.sort && StrictStyle == w.style {
- sortAllKeys(w.fixedTagOrder, uniqueKeys)
+ sortKeys(w.fixedTagOrder, uniqueKeys)
maxTagNum = len(uniqueKeys)
}
@@ -340,27 +305,26 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
msg := "tag is not aligned, should be: " + unquoteTag
- w.report(pass, field, offsets[i], msg, newTagValue)
+ w.report(pass, field, msg, newTagValue)
}
}
// process single fields
for _, field := range w.singleFields {
- column := pass.Fset.Position(field.Tag.Pos()).Column - 1
tag, err := strconv.Unquote(field.Tag.Value)
if err != nil {
- w.report(pass, field, column, errTagValueSyntax, field.Tag.Value)
+ w.report(pass, field, errTagValueSyntax, field.Tag.Value)
continue
}
tags, err := structtag.Parse(tag)
if err != nil {
- w.report(pass, field, column, err.Error(), field.Tag.Value)
+ w.report(pass, field, err.Error(), field.Tag.Value)
continue
}
originalTags := append([]*structtag.Tag(nil), tags.Tags()...)
if w.sort {
- sortBy(w.fixedTagOrder, tags)
+ sortTags(w.fixedTagOrder, tags)
}
newTagValue := fmt.Sprintf("`%s`", tags.String())
@@ -371,85 +335,47 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit
msg := "tag is not aligned , should be: " + tags.String()
- w.report(pass, field, column, msg, newTagValue)
+ w.report(pass, field, msg, newTagValue)
}
}
-// Issues returns all issues found by the analyzer.
-// It is used to integrate with golangci-lint.
-func (w *Helper) Issues() []Issue {
- log.Println("tagalign 's Issues() should only be called in golangci-lint mode")
- return w.issues
-}
-
-// sortBy sorts tags by fixed order.
+// sortTags sorts tags by fixed order.
// If a tag is not in the fixed order, it will be sorted by name.
-func sortBy(fixedOrder []string, tags *structtag.Tags) {
- // sort by fixed order
- sort.Slice(tags.Tags(), func(i, j int) bool {
- ti := tags.Tags()[i]
- tj := tags.Tags()[j]
-
- oi := findIndex(fixedOrder, ti.Key)
- oj := findIndex(fixedOrder, tj.Key)
-
- if oi == -1 && oj == -1 {
- return ti.Key < tj.Key
- }
-
- if oi == -1 {
- return false
- }
-
- if oj == -1 {
- return true
- }
-
- return oi < oj
+func sortTags(fixedOrder []string, tags *structtag.Tags) {
+ slices.SortFunc(tags.Tags(), func(a, b *structtag.Tag) int {
+ return compareByFixedOrder(fixedOrder)(a.Key, b.Key)
})
}
-func sortAllKeys(fixedOrder []string, keys []string) {
- sort.Slice(keys, func(i, j int) bool {
- oi := findIndex(fixedOrder, keys[i])
- oj := findIndex(fixedOrder, keys[j])
+func sortKeys(fixedOrder []string, keys []string) {
+ slices.SortFunc(keys, compareByFixedOrder(fixedOrder))
+}
+
+func compareByFixedOrder(fixedOrder []string) func(a, b string) int {
+ return func(a, b string) int {
+ oi := slices.Index(fixedOrder, a)
+ oj := slices.Index(fixedOrder, b)
if oi == -1 && oj == -1 {
- return keys[i] < keys[j]
+ return strings.Compare(a, b)
}
if oi == -1 {
- return false
+ return 1
}
if oj == -1 {
- return true
+ return -1
}
- return oi < oj
- })
-}
-
-func findIndex(s []string, e string) int {
- for i, a := range s {
- if a == e {
- return i
- }
+ return cmp.Compare(oi, oj)
}
- return -1
}
func alignFormat(length int) string {
return "%" + fmt.Sprintf("-%ds", length)
}
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
func removeField(fields []*ast.Field, index int) []*ast.Field {
if index < 0 || index >= len(fields) {
return fields
@@ -457,3 +383,12 @@ func removeField(fields []*ast.Field, index int) []*ast.Field {
return append(fields[:index], fields[index+1:]...)
}
+
+func getFilename(fset *token.FileSet, file *ast.File) string {
+ filename := fset.PositionFor(file.Pos(), true).Filename
+ if !strings.HasSuffix(filename, ".go") {
+ return fset.PositionFor(file.Pos(), false).Filename
+ }
+
+ return filename
+}
diff --git a/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go
index 5507d95462..703cc1c39f 100644
--- a/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go
+++ b/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go
@@ -125,7 +125,7 @@ const (
)
func (n *nilNil) isDangerNilType(t types.Type) (bool, zeroValue) {
- switch v := t.(type) {
+ switch v := types.Unalias(t).(type) {
case *types.Pointer:
return n.checkedTypes.Contains(ptrType), zeroValueNil
diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go
index 53c74ac453..1464fd640b 100644
--- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go
+++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go
@@ -48,7 +48,7 @@ func (checker EncodedCompare) Check(pass *analysis.Pass, call *CallMeta) *analys
switch {
case aIsExplicitJSON, bIsExplicitJSON, isJSONStyleExpr(pass, a), isJSONStyleExpr(pass, b):
proposed = "JSONEq"
- case isYAMLStyleExpr(a), isYAMLStyleExpr(b):
+ case isYAMLStyleExpr(pass, a), isYAMLStyleExpr(pass, b):
proposed = "YAMLEq"
}
diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go
index 896b6bf5fa..7ff4de470a 100644
--- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go
+++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go
@@ -115,7 +115,11 @@ func (checker Formatter) checkFmtAssertion(pass *analysis.Pass, call *CallMeta)
func isPrintfLikeCall(pass *analysis.Pass, call *CallMeta) (int, bool) {
msgAndArgsPos := getMsgAndArgsPosition(call.Fn.Signature)
- if msgAndArgsPos < 0 {
+ if msgAndArgsPos <= 0 {
+ return -1, false
+ }
+
+ if !(len(call.ArgsRaw) > msgAndArgsPos && hasStringType(pass, call.ArgsRaw[msgAndArgsPos])) {
return -1, false
}
@@ -123,7 +127,7 @@ func isPrintfLikeCall(pass *analysis.Pass, call *CallMeta) (int, bool) {
return -1, false
}
- return msgAndArgsPos, len(call.ArgsRaw) > msgAndArgsPos
+ return msgAndArgsPos, true
}
func assertHasFormattedAnalogue(pass *analysis.Pass, call *CallMeta) bool {
diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go
index 9b43e914cc..b4bb563219 100644
--- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go
+++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go
@@ -166,6 +166,12 @@ func hasBytesType(pass *analysis.Pass, e ast.Expr) bool {
return ok && el.Kind() == types.Uint8
}
+// hasStringType returns true if the expression is of `string` type.
+func hasStringType(pass *analysis.Pass, e ast.Expr) bool {
+ basicType, ok := pass.TypesInfo.TypeOf(e).(*types.Basic)
+ return ok && basicType.Kind() == types.String
+}
+
// untype returns v from type(v) expression or v itself if there is no type conversion.
func untype(e ast.Expr) ast.Expr {
ce, ok := e.(*ast.CallExpr)
diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go
index 35a497a724..c366f85635 100644
--- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go
+++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go
@@ -11,13 +11,15 @@ import (
)
var (
+ wordsRe = regexp.MustCompile(`[A-Z]+(?:[a-z]*|$)|[a-z]+`) // NOTE(a.telyshev): ChatGPT.
+
jsonIdentRe = regexp.MustCompile(`json|JSON|Json`)
- yamlIdentRe = regexp.MustCompile(`yaml|YAML|Yaml|yml|YML|Yml`)
+ yamlWordRe = regexp.MustCompile(`yaml|YAML|Yaml|^(yml|YML|Yml)$`)
)
func isJSONStyleExpr(pass *analysis.Pass, e ast.Expr) bool {
if isIdentNamedAfterPattern(jsonIdentRe, e) {
- return true
+ return hasBytesType(pass, e) || hasStringType(pass, e)
}
if t, ok := pass.TypesInfo.Types[e]; ok && t.Value != nil {
@@ -35,6 +37,20 @@ func isJSONStyleExpr(pass *analysis.Pass, e ast.Expr) bool {
return false
}
-func isYAMLStyleExpr(e ast.Expr) bool {
- return isIdentNamedAfterPattern(yamlIdentRe, e)
+func isYAMLStyleExpr(pass *analysis.Pass, e ast.Expr) bool {
+ id, ok := e.(*ast.Ident)
+ return ok && (hasBytesType(pass, e) || hasStringType(pass, e)) && hasWordAfterPattern(id.Name, yamlWordRe)
+}
+
+func hasWordAfterPattern(s string, re *regexp.Regexp) bool {
+ for _, w := range splitIntoWords(s) {
+ if re.MatchString(w) {
+ return true
+ }
+ }
+ return false
+}
+
+func splitIntoWords(s string) []string {
+ return wordsRe.FindAllString(s, -1)
}
diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go
index a8812e6d01..23b673428e 100644
--- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go
+++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go
@@ -133,7 +133,7 @@ func BindToFlags(cfg *Config, fs *flag.FlagSet) {
"to enable go vet's printf checks")
fs.BoolVar(&cfg.Formatter.RequireFFuncs,
"formatter.require-f-funcs", false,
- "to require f-assertions if format string is used")
+ "to require f-assertions (e.g. assert.Equalf) if format string is used, even if there are no variable-length variables.")
fs.BoolVar(&cfg.GoRequire.IgnoreHTTPHandlers,
"go-require.ignore-http-handlers", false,
diff --git a/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go b/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go
index a65efbba89..7b88bf56e9 100644
--- a/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go
+++ b/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go
@@ -59,7 +59,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
{
Pos: assignStmt.Pos(),
End: assignStmt.End(),
- NewText: []byte(suggested),
+ NewText: suggested,
},
},
})
@@ -215,10 +215,10 @@ func getRootIdent(pass *analysis.Pass, node ast.Node) *ast.Ident {
}
// render returns the pretty-print of the given node
-func render(fset *token.FileSet, x interface{}) (string, error) {
+func render(fset *token.FileSet, x interface{}) ([]byte, error) {
var buf bytes.Buffer
if err := printer.Fprint(&buf, fset, x); err != nil {
- return "", fmt.Errorf("printing node: %w", err)
+ return nil, fmt.Errorf("printing node: %w", err)
}
- return buf.String(), nil
+ return buf.Bytes(), nil
}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/.custom-gcl.yml b/hack/tools/vendor/github.com/JoelSpeed/kal/.custom-gcl.yml
new file mode 100644
index 0000000000..f31f0effa8
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/.custom-gcl.yml
@@ -0,0 +1,6 @@
+version: v1.62.0
+name: golangci-kal
+destination: ./bin
+plugins:
+- module: 'github.com/JoelSpeed/kal'
+ path: ./
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/.gitignore b/hack/tools/vendor/github.com/JoelSpeed/kal/.gitignore
new file mode 100644
index 0000000000..1fc7cb9297
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/.gitignore
@@ -0,0 +1,21 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+bin
+testbin/*
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# editor and IDE paraphernalia
+.idea
+*.swp
+*.swo
+*~
+.vscode/
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/.golangci.yaml b/hack/tools/vendor/github.com/JoelSpeed/kal/.golangci.yaml
new file mode 100644
index 0000000000..9b090a0113
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/.golangci.yaml
@@ -0,0 +1,97 @@
+linters:
+ disable-all: true
+ # Enable specific linter
+ # https://golangci-lint.run/usage/linters/#enabled-by-default-linters
+ enable:
+ # Default linters
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - typecheck
+ - unused
+ # Additional linters
+ - asciicheck
+ - bidichk
+ - bodyclose
+ - contextcheck
+ - cyclop
+ - dogsled
+ - dupl
+ - durationcheck
+ - errname
+ - errorlint
+ - exhaustive
+ - exportloopref
+ - forcetypeassert
+ - funlen
+ - gochecknoglobals
+ - gocognit
+ - goconst
+ - gocritic
+ - gocyclo
+ - godot
+ - goerr113
+ - gofmt
+ - goimports
+ - goprintffuncname
+ - gosec
+ - importas
+ - makezero
+ - misspell
+ - nakedret
+ - nestif
+ - nilerr
+ - nilnil
+ - nlreturn
+ - noctx
+ - nolintlint
+ - prealloc
+ - predeclared
+ - revive
+ - stylecheck
+ - tagliatelle
+ - tenv
+ - unconvert
+ - unparam
+ - wastedassign
+ - whitespace
+ - wrapcheck
+ - wsl
+linters-settings:
+ nlreturn:
+ block-size: 2
+ revive:
+ confidence: 0
+ rules:
+ - name: exported
+ severity: warning
+ disabled: false
+ arguments:
+ - "checkPrivateReceivers"
+ - "disableStutteringCheck"
+ stylecheck:
+ # https://staticcheck.io/docs/options#checks
+ checks: ["all", "-ST1000"]
+ dot-import-whitelist:
+ - "github.com/onsi/ginkgo/v2"
+ - "github.com/onsi/gomega"
+issues:
+ exclude:
+ - Analyzer is a global variable
+ exclude-use-default: false
+ exclude-rules:
+ # Exclude some linters from running on tests files.
+ - path: _test\.go
+ linters:
+ - gocyclo
+ - dupl
+ - gosec
+ - gochecknoglobals
+ - goerr113
+ - funlen
+ - path: testdata
+ linters:
+ - all
+
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/LICENSE b/hack/tools/vendor/github.com/JoelSpeed/kal/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/Makefile b/hack/tools/vendor/github.com/JoelSpeed/kal/Makefile
new file mode 100644
index 0000000000..a93bd95495
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/Makefile
@@ -0,0 +1,66 @@
+PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
+GOLANGCI_LINT = go run ${PROJECT_DIR}/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint
+
+VERSION ?= $(shell git describe --always --abbrev=7)
+
+.PHONY: all
+all: build
+
+##@ General
+
+# The help target prints out all targets with their descriptions organized
+# beneath their categories. The categories are represented by '##@' and the
+# target descriptions by '##'. The awk commands is responsible for reading the
+# entire set of makefiles included in this invocation, looking for lines of the
+# file as xyz: ## something, and then pretty-format the target and help. Then,
+# if there's a line with ##@ something, that gets pretty-printed as a category.
+# More info on the usage of ANSI control characters for terminal formatting:
+# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
+# More info on the awk command:
+# http://linuxcommand.org/lc3_adv_awk.php
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+##@ Development
+
+.PHONY: fmt
+fmt: ## Run go fmt against code.
+ go fmt ./...
+
+.PHONY: vet
+vet: ## Run go vet against code.
+ go vet ./...
+
+.PHONY: vendor
+vendor: ## Ensure the vendor directory is up to date.
+ go mod tidy
+ go mod vendor
+ go mod verify
+
+.PHONY: lint
+lint: ## Run golangci-lint over the codebase.
+ ${GOLANGCI_LINT} run ./... --timeout 5m -v
+
+.PHONY: test
+test: fmt vet unit ## Run tests.
+
+.PHONY: unit
+unit: ## Run unit tests.
+ go test ./...
+
+##@ Build
+
+.PHONY: build
+build: fmt vet ## Build KAL binary.
+ go build -o bin/kal ./cmd/kal
+
+.PHONY: build-golangci
+build-golangci: ## Run golangci-lint over the codebase.
+ ${GOLANGCI_LINT} custom
+
+.PHONY: verify-%
+verify-%:
+ make $*
+ git diff --exit-code
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/README.md b/hack/tools/vendor/github.com/JoelSpeed/kal/README.md
new file mode 100644
index 0000000000..06a3c11bbb
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/README.md
@@ -0,0 +1,298 @@
+# KAL - The Kubernetes API Linter
+
+KAL is a Golang based linter for Kubernetes API types. It checks for common mistakes and enforces best practices.
+The rules implemented by KAL, are based on the [Kubernetes API Conventions][api-conventions].
+
+KAL is aimed at being an assistant to API review, by catching the mechanical elements of API review, and allowing reviewers to focus on the more complex aspects of API design.
+
+[api-conventions]: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
+
+## Installation
+
+KAL currently comes in two flavours, a standalone binary, and a golangci-lint plugin.
+
+### Standalone Binary
+
+To install the standalone binary, run the following command:
+
+```shell
+go install github.com/JoelSpeed/kal/cmd/kal@latest
+```
+
+The standalone binary can be run with the following command:
+
+```shell
+kal path/to/api/types
+```
+
+`kal` currently accepts no complex configuration, and will run all checks considered to be default.
+
+Individual linters can be disabled with the flag corresponding to the linter name. For example, to disable the `commentstart` linter, run the following command:
+
+```shell
+kal -commentstart=false path/to/api/types
+```
+
+Where fixes are available, these can be applied automatically with the `-fix` flag.
+Note, automatic fixing is currently only available via the standalone binary, and is not available via the `golangci-lint` plugin.
+
+```shell
+kal -fix path/to/api/types
+```
+
+Other standard Golang linter flags implemented by [multichecker][multichecker] based linters are also supported.
+
+[multichecker]: https://pkg.go.dev/golang.org/x/tools/go/analysis/multichecker
+
+### Golangci-lint Plugin
+
+To install the `golangci-lint` plugin, first you must have `golangci-lint` installed.
+If you do not have `golangci-lint` installed, review the `golangci-lint` [install guide][golangci-lint-install].
+
+[golangci-lint-install]: https://golangci-lint.run/welcome/install/
+
+You will need to create a `.custom-gcl.yml` file to describe the custom linters you want to run. The following is an example of a `.custom-gcl.yml` file:
+
+```yaml
+version: v1.62.0
+name: golangci-kal
+destination: ./bin
+plugins:
+- module: 'github.com/JoelSpeed/kal'
+ version: 'v0.0.0' # Replace with the latest version
+```
+
+Once you have created the custom configuration file, you can run the following command to build the custom `golangci-kal` binary:
+
+```shell
+golangci-lint custom
+```
+
+The output binary will be a combination of the initial `golangci-lint` binary and the KAL linters.
+This means that you can use any of the standard `golangci-lint` configuration or flags to run the binary, but may also include the KAL linters.
+
+If you wish to only use the KAL linters, you can configure your `.golangci.yml` file to only run the KAL linters:
+
+```yaml
+linters-settings:
+ custom:
+ kal:
+ type: "module"
+ description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices.
+ settings:
+ linters: {}
+ lintersConfig: {}
+linters:
+ disable-all: true
+ enable:
+ - kal
+
+# To only run KAL on specific path
+issues:
+ exclude-rules:
+ - path-except: "api/*"
+ linters:
+ - kal
+```
+
+The settings for KAL are based on the [GolangCIConfig][golangci-config-struct] struct and allow for finer control over the linter rules.
+The finer control over linter rules is not currently avaialable outside of the plugin based version of KAL.
+
+If you wish to use the KAL linters in conjunction with other linters, you can enable the KAL linters in the `.golangci.yml` file by ensuring that `kal` is in the `linters.enabled` list.
+To provide further configuration, add the `custom.kal` section to your `linter-settings` as per the example above.
+
+[golangci-config-struct]: https://pkg.go.dev/github.com/JoelSpeed/kal/pkg/config#GolangCIConfig
+
+#### VSCode integration
+
+Since VSCode already integrates with `golangci-lint` via the [Go][vscode-go] extension, you can use the `golangci-kal` binary as a linter in VSCode.
+If your project authors are already using VSCode and have the configuration to lint their code when saving, this can be a seamless integration.
+
+Ensure that your project setup includes building the `golangci-kal` binary, and then configure the `go.lintTool` and `go.alternateTools` settings in your project `.vscode/settings.json` file.
+
+[vscode-go]: https://code.visualstudio.com/docs/languages/go
+
+```json
+{
+ "go.lintTool": "golangci-lint",
+ "go.alternateTools": {
+ "golangci-lint": "${workspaceFolder}/bin/golangci-kal",
+ }
+}
+```
+
+Alternatively, you can also replace the binary with a script that runs the `golangci-kal` binary,
+allowing for customisation or automatic copmilation of the project should it not already exist.
+
+```json
+{
+ "go.lintTool": "golangci-lint",
+ "go.alternateTools": {
+ "golangci-lint": "${workspaceFolder}/hack/golangci-lint.sh",
+ }
+}
+```
+
+# Linters
+
+## Conditions
+
+The `conditions` linter checks that `Conditions` fields in the API types are correctly formatted.
+The `Conditions` field should be a slice of `metav1.Condition` with the following tags and markers:
+
+```go
+// +listType=map
+// +listMapKey=type
+// +patchStrategy=merge
+// +patchMergeKey=type
+// +optional
+Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,opt,name=conditions"`
+```
+
+Conditions are idiomatically the first field within the status struct, and the linter will highlight when the Conditions are not the first field.
+
+### Configuration
+
+```yaml
+lintersConfig:
+ conditions:
+ isFirstField: Warn | Ignore # The policy for the Conditions field being the first field. Defaults to `Warn`.
+ useProtobuf: SuggestFix | Warn | Ignore # The policy for the protobuf tag on the Conditions field. Defaults to `SuggestFix`.
+```
+
+### Fixes (via standalone binary only)
+
+The `conditions` linter can automatically fix the tags on the `Conditions` field.
+When they do not match the expected format, the linter will suggest to update the tags to match the expected format.
+
+For CRDs, protobuf tags are not expected. By setting the `useProtobuf` configuration to `Ignore`, the linter will not suggest to add the protobuf tag to the `Conditions` field tags.
+
+The linter will also suggest to add missing markers.
+If any of the 5 markers in the example above are missing, the linter will suggest to add them directly above the field.
+
+## CommentStart
+
+The `commentstart` linter checks that all comments in the API types start with the serialized form of the type they are commenting on.
+This helps to ensure that generated documentation reflects the most common usage of the field, the serialized YAML form.
+
+### Fixes (via standalone binary only)
+
+The `commentstart` linter can automatically fix comments that do not start with the serialized form of the type.
+
+When the `json` tag is present, and matches the first word of the field comment in all but casing, the linter will suggest that the comment be updated to match the `json` tag.
+
+## Integers
+
+The `integers` linter checks for usage of unsupported integer types.
+Only `int32` and `int64` types should be used in APIs, and other integer types, including unsigned integers are forbidden.
+
+## JSONTags
+
+The `jsontags` linter checks that all fields in the API types have a `json` tag, and that those tags are correctly formatted.
+The `json` tag for a field within a Kubernetes API type should use a camel case version of the field name.
+
+The `jsontags` linter checks the tag name against the regex `"^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$"` which allows consecutive upper case characters, to allow for acronyms, e.g. `requestTTL`.
+
+### Configuration
+
+```yaml
+lintersConfig:
+ jsonTags:
+ jsonTagRegex: "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" # Provide a custom regex, which the json tag must match.
+```
+
+## MaxLength
+
+The `maxlength` linter checks that string and array fields in the API are bounded by a maximum length.
+
+For strings, this means they have a `+kubebuilder:validation:MaxLength` marker.
+
+For arrays, this means they have a `+kubebuilder:validation:MaxItems` marker.
+
+For arrays of strings, the array element should also have a `+kubebuilder:validation:MaxLength` marker if the array element is a type alias,
+or `+kubebuilder:validation:items:MaxLenth` if the array is an element of the built-in string type.
+
+Adding maximum lengths to strings and arrays not only ensures that the API is not abused (used to store overly large data, reduces DDOS etc.),
+but also allows CEL validation cost estimations to be kept within reasonable bounds.
+
+## NoBools
+
+The `nobools` linter checks that fields in the API types do not contain a `bool` type.
+
+Booleans are limited and do not evolve well over time.
+It is recommended instead to create a string alias with meaningful values, as an enum.
+
+## Nophase
+
+The `nophase` linter checks that the fields in the API types don't contain a 'Phase', or any field which contains 'Phase' as a substring, e.g MachinePhase.
+
+## OptionalOrRequired
+
+The `optionalorrequired` linter checks that all fields in the API types are either optional or required, and are marked explicitly as such.
+
+The linter expects to find a comment marker `// +optional` or `// +required` within the comment for the field.
+
+It also supports the `// +kubebuilder:validation:Optional` and `// +kubebuilder:validation:Required` markers, but will suggest to use the `// +optional` and `// +required` markers instead.
+
+If you prefer to use the Kubebuilder markers instead, you can change the preference in the configuration.
+
+### Configuration
+
+```yaml
+lintersConfig:
+ optionalOrRequired:
+ preferredOptionalMarker: optional | kubebuilder:validation:Optional # The preferred optional marker to use, fixes will suggest to use this marker. Defaults to `optional`.
+ preferredRequiredMarker: required | kubebuilder:validation:Required # The preferred required marker to use, fixes will suggest to use this marker. Defaults to `required`.
+```
+
+### Fixes (via standalone binary only)
+
+The `optionalorrequired` linter can automatically fix fields that are using the incorrect form of either the optional or required marker.
+
+It will also remove the secondary marker where both the preferred and secondary marker are present on a field.
+
+## RequiredFields
+
+The `requiredfields` linter checks that fields that are marked as required, follow the convention of not being pointers,
+and not having an `omitempty` value in their `json` tag.
+
+### Configuration
+
+```yaml
+lintersConfig:
+ requiredFields:
+ pointerPolicy: Warn | SuggestFix # The policy for pointers in required fields. Defaults to `SuggestFix`.
+```
+
+### Fixes (via standalone binary only)
+
+The `requiredfields` linter can automatically fix fields that are marked as required, but are pointers.
+
+It will suggest to remove the pointer from the field, and update the `json` tag to remove the `omitempty` value.
+
+If you prefer not to suggest fixes for pointers in required fields, you can change the `pointerPolicy` to `Warn`.
+The linter will then only suggest to remove the `omitempty` value from the `json` tag.
+
+## StatusSubresource
+
+The `statussubresource` linter checks that the status subresource is configured correctly for
+structs marked with the `kubebuilder:object:root:=true` marker. Correct configuration is that
+when there is a status field the `kubebuilder:subresource:status` marker is present on the struct
+OR when the `kubebuilder:subresource:status` marker is present on the struct there is a status field.
+
+This linter is not enabled by default as it is only applicable to CustomResourceDefinitions.
+
+### Fixes (via standalone binary only)
+
+In the case where there is a status field present but no `kubebuilder:subresource:status` marker, the
+linter will suggest adding the comment `// +kubebuilder:subresource:status` above the struct.
+
+# Contributing
+
+New linters can be added by following the [New Linter][new-linter] guide.
+
+[new-linter]: docs/new-linter.md
+
+# License
+
+KAL is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/cmd/kal/main.go b/hack/tools/vendor/github.com/JoelSpeed/kal/cmd/kal/main.go
new file mode 100644
index 0000000000..410053395e
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/cmd/kal/main.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ kalanalysis "github.com/JoelSpeed/kal/pkg/analysis"
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis/multichecker"
+)
+
+func main() {
+ analyzers, err := kalanalysis.NewRegistry().InitializeLinters(config.Linters{}, config.LintersConfig{})
+ if err != nil {
+ panic(err)
+ }
+
+ multichecker.Main(
+ analyzers...,
+ )
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/doc.go
new file mode 100644
index 0000000000..3b14dbe2be
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/doc.go
@@ -0,0 +1,34 @@
+/*
+KAL is a linter for Kubernetes API types, that implements API conventions and best practices.
+
+This package provides a GolangCI-Lint plugin that can be used to build a custom linter for Kubernetes API types.
+The custom golangci-lint binary can be built by checking out the KAL repository and running `make build-golangci`.
+This will generate a custom golangci-lint binary in the `bin` directory.
+
+The custom golangci-lint binary can be run with the `run` command, and the KAL linters can be enabled by setting the `kal` linter in the `.golangci.yml` configuration file.
+
+Example `.golangci.yml` configuration file:
+
+ linters-settings:
+ custom:
+ kal:
+ type: "module"
+ description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices.
+ settings:
+ linters:
+ enabled: []
+ disabled: []
+ lintersConfig:
+ jsonTags:
+ jsonTagRegex: ""
+ optionalOrRequired:
+ preferredOptionalMarker: ""
+ preferredRequiredMarker: ""
+ linters:
+ disable-all: true
+ enable:
+ - kal
+
+New linters can be added in the [github.com/JoelSpeed/kal/pkg/analysis] package.
+*/
+package kal
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/analyzer.go
new file mode 100644
index 0000000000..57e2af6783
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/analyzer.go
@@ -0,0 +1,108 @@
+package commentstart
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strings"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const name = "commentstart"
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetJSONTags = errors.New("could not get json tags")
+)
+
+// Analyzer is the analyzer for the commentstart package.
+// It checks that all struct fields in an API have a godoc, and that the godoc starts with the serialised field name.
+var Analyzer = &analysis.Analyzer{
+ Name: name,
+ Doc: "Check that all struct fields in an API have a godoc, and that the godoc starts with the serialised field name",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, extractjsontags.Analyzer},
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+ if !ok {
+ return nil, errCouldNotGetJSONTags
+ }
+
+ // Filter to structs so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ sTyp, ok := n.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ if sTyp.Fields == nil {
+ return
+ }
+
+ for _, field := range sTyp.Fields.List {
+ checkField(pass, field, jsonTags)
+ }
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func checkField(pass *analysis.Pass, field *ast.Field, jsonTags extractjsontags.StructFieldTags) {
+ if field == nil || len(field.Names) == 0 {
+ return
+ }
+
+ fieldName := field.Names[0].Name
+
+ tagInfo := jsonTags.FieldTags(field)
+
+ if tagInfo.Name == "" {
+ return
+ }
+
+ if field.Doc == nil {
+ pass.Reportf(field.Pos(), "field %s is missing godoc comment", fieldName)
+ return
+ }
+
+ firstLine := field.Doc.List[0]
+ if !strings.HasPrefix(firstLine.Text, "// "+tagInfo.Name+" ") {
+ if strings.HasPrefix(strings.ToLower(firstLine.Text), strings.ToLower("// "+tagInfo.Name+" ")) {
+ // The comment start is correct, apart from the casing, we can fix that.
+ pass.Report(analysis.Diagnostic{
+ Pos: firstLine.Pos(),
+ Message: fmt.Sprintf("godoc for field %s should start with '%s ...'", fieldName, tagInfo.Name),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: fmt.Sprintf("should replace first word with `%s`", tagInfo.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: firstLine.Pos(),
+ End: firstLine.Pos() + token.Pos(len(tagInfo.Name)) + token.Pos(4),
+ NewText: []byte("// " + tagInfo.Name + " "),
+ },
+ },
+ },
+ },
+ })
+ } else {
+ pass.Reportf(field.Doc.List[0].Pos(), "godoc for field %s should start with '%s ...'", fieldName, tagInfo.Name)
+ }
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/doc.go
new file mode 100644
index 0000000000..f50d57b0d3
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/doc.go
@@ -0,0 +1,19 @@
+/*
+commentstart is a simple analysis tool that checks if the first line of a comment is the same as the json tag.
+
+By convention in Go, comments typically start with the name of the item being described.
+In the case of field names, this would mean for a field Foo, the comment should look like:
+
+ // Foo is a field that does something.
+ Foo string `json:"foo"`
+
+However, in Kubernetes API types, the json tag is often used to generate documentation.
+In this case, the comment should start with the json tag, like so:
+
+ // foo is a field that does something.
+ Foo string `json:"foo"`
+
+This ensures that for any generated documentation, the documentation refers to the serialized field name.
+We expect most readers of Kubernetes API documentation will be more familiar with the serialized field names than the Go field names.
+*/
+package commentstart
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/initializer.go
new file mode 100644
index 0000000000..370106fd04
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/initializer.go
@@ -0,0 +1,30 @@
+package commentstart
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return Analyzer, nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/analyzer.go
new file mode 100644
index 0000000000..9c48375bf2
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/analyzer.go
@@ -0,0 +1,312 @@
+package conditions
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strings"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers"
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ name = "conditions"
+
+ listTypeMarkerID = "listType"
+ listMapKeyMarkerID = "listMapKey"
+ patchStrategyMarkerID = "patchStrategy"
+ patchMergeKeyMarkerID = "patchMergeKey"
+
+ listTypeMap = "listType=map"
+ listMapKeyType = "listMapKey=type"
+ patchStrategy = "patchStrategy=merge"
+ patchMergeKeyType = "patchMergeKey=type"
+ optional = "optional"
+
+ expectedTagWithProtobufFmt = "`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,%d,rep,name=conditions\"`"
+ expectedTagWithoutProtobuf = "`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"`"
+)
+
+func init() {
+ markers.DefaultRegistry().Register(
+ listTypeMarkerID,
+ listMapKeyMarkerID,
+ patchStrategyMarkerID,
+ patchMergeKeyMarkerID,
+ optional,
+ )
+}
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetMarkers = errors.New("could not get markers")
+)
+
+type analyzer struct {
+ isFirstField config.ConditionsFirstField
+ useProtobuf config.ConditionsUseProtobuf
+}
+
+// newAnalyzer creates a new analyzer.
+func newAnalyzer(cfg config.ConditionsConfig) *analysis.Analyzer {
+ defaultConfig(&cfg)
+
+ a := &analyzer{
+ isFirstField: cfg.IsFirstField,
+ useProtobuf: cfg.UseProtobuf,
+ }
+
+ return &analysis.Analyzer{
+ Name: name,
+ Doc: `Checks that all conditions type fields conform to the required conventions.`,
+ Run: a.run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer},
+ }
+}
+
+func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers)
+ if !ok {
+ return nil, errCouldNotGetMarkers
+ }
+
+ // Filter to structs so that we can iterate over fields in a struct.
+ // We need a struct here so that we can tell where in the struct the field is.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ sTyp, ok := n.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ if sTyp.Fields == nil {
+ return
+ }
+
+ for i, field := range sTyp.Fields.List {
+ fieldMarkers := markersAccess.FieldMarkers(field)
+
+ a.checkField(pass, i, field, fieldMarkers)
+ }
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func (a *analyzer) checkField(pass *analysis.Pass, index int, field *ast.Field, fieldMarkers markers.MarkerSet) {
+ if !fieldIsCalledConditions(field) {
+ return
+ }
+
+ if !isSliceMetaV1Condition(field) {
+ pass.Reportf(field.Pos(), "Conditions field must be a slice of metav1.Condition")
+ return
+ }
+
+ checkFieldMarkers(pass, field, fieldMarkers)
+ a.checkFieldTags(pass, index, field)
+
+ if a.isFirstField == config.ConditionsFirstFieldWarn && index != 0 {
+ pass.Reportf(field.Pos(), "Conditions field must be the first field in the struct")
+ }
+}
+
+func checkFieldMarkers(pass *analysis.Pass, field *ast.Field, fieldMarkers markers.MarkerSet) {
+ missingMarkers := []string{}
+
+ if !fieldMarkers.HasWithValue(listTypeMap) {
+ missingMarkers = append(missingMarkers, listTypeMap)
+ }
+
+ if !fieldMarkers.HasWithValue(listMapKeyType) {
+ missingMarkers = append(missingMarkers, listMapKeyType)
+ }
+
+ if !fieldMarkers.HasWithValue(patchStrategy) {
+ missingMarkers = append(missingMarkers, patchStrategy)
+ }
+
+ if !fieldMarkers.HasWithValue(patchMergeKeyType) {
+ missingMarkers = append(missingMarkers, patchMergeKeyType)
+ }
+
+ if !fieldMarkers.Has(optional) {
+ missingMarkers = append(missingMarkers, optional)
+ }
+
+ if len(missingMarkers) != 0 {
+ pass.Report(analysis.Diagnostic{
+ Pos: field.Pos(),
+ End: field.End(),
+ Message: "Conditions field is missing the following markers: " + strings.Join(missingMarkers, ", "),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: "Add missing markers",
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: field.Pos(),
+ End: token.NoPos,
+ NewText: getNewMarkers(missingMarkers),
+ },
+ },
+ },
+ },
+ })
+ }
+}
+
+func getNewMarkers(missingMarkers []string) []byte {
+ var out string
+
+ for _, marker := range missingMarkers {
+ out += "// +" + marker + "\n"
+ }
+
+ return []byte(out)
+}
+
+func (a *analyzer) checkFieldTags(pass *analysis.Pass, index int, field *ast.Field) {
+ if field.Tag == nil {
+ expectedTag := getExpectedTag(a.useProtobuf, a.isFirstField, index)
+
+ pass.Report(analysis.Diagnostic{
+ Pos: field.Pos(),
+ End: field.End(),
+ Message: "Conditions field is missing tags, should be: " + expectedTag,
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: "Add missing tags",
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: field.End(),
+ End: token.NoPos,
+ NewText: []byte(expectedTag),
+ },
+ },
+ },
+ },
+ })
+
+ return
+ }
+
+ asExpected, shouldFix := tagIsAsExpected(field.Tag.Value, a.useProtobuf, a.isFirstField, index)
+ if !asExpected {
+ expectedTag := getExpectedTag(a.useProtobuf, a.isFirstField, index)
+
+ if !shouldFix {
+ pass.Reportf(field.Tag.ValuePos, "Conditions field has incorrect tags, should be: %s", expectedTag)
+ } else {
+ pass.Report(analysis.Diagnostic{
+ Pos: field.Tag.ValuePos,
+ End: field.Tag.End(),
+ Message: "Conditions field has incorrect tags, should be: " + expectedTag,
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: "Update tags",
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: field.Tag.ValuePos,
+ End: field.Tag.End(),
+ NewText: []byte(expectedTag),
+ },
+ },
+ },
+ },
+ })
+ }
+ }
+}
+
+func getExpectedTag(useProtobuf config.ConditionsUseProtobuf, isFirstField config.ConditionsFirstField, index int) string {
+ if useProtobuf == config.ConditionsUseProtobufSuggestFix || useProtobuf == config.ConditionsUseProtobufWarn {
+ i := 1
+ if isFirstField == config.ConditionsFirstFieldIgnore {
+ i = index + 1
+ }
+
+ return fmt.Sprintf(expectedTagWithProtobufFmt, i)
+ }
+
+ return expectedTagWithoutProtobuf
+}
+
+func tagIsAsExpected(tag string, useProtobuf config.ConditionsUseProtobuf, isFirstField config.ConditionsFirstField, index int) (bool, bool) {
+ switch useProtobuf {
+ case config.ConditionsUseProtobufSuggestFix:
+ return tag == getExpectedTag(config.ConditionsUseProtobufSuggestFix, isFirstField, index), true
+ case config.ConditionsUseProtobufWarn:
+ return tag == getExpectedTag(config.ConditionsUseProtobufWarn, isFirstField, index), false
+ case config.ConditionsUseProtobufIgnore:
+ return tag == getExpectedTag(config.ConditionsUseProtobufIgnore, isFirstField, index) || tag == getExpectedTag(config.ConditionsUseProtobufSuggestFix, isFirstField, index), true
+ default:
+ panic("unexpected useProtobuf value")
+ }
+}
+
+func fieldIsCalledConditions(field *ast.Field) bool {
+ if field == nil {
+ return false
+ }
+
+ return len(field.Names) != 0 && field.Names[0] != nil && field.Names[0].Name == "Conditions"
+}
+
+func isSliceMetaV1Condition(field *ast.Field) bool {
+ if field == nil {
+ return false
+ }
+
+ // Field is not an array type.
+ arr, ok := field.Type.(*ast.ArrayType)
+ if !ok {
+ return false
+ }
+
+ // Array element is not imported.
+ selector, ok := arr.Elt.(*ast.SelectorExpr)
+ if !ok {
+ return false
+ }
+
+ pkg, ok := selector.X.(*ast.Ident)
+ if !ok {
+ return false
+ }
+
+ // Array element is not imported from metav1.
+ if selector.X == nil || pkg.Name != "metav1" {
+ return false
+ }
+
+ // Array element is not a metav1.Condition.
+ if selector.Sel == nil || selector.Sel.Name != "Condition" {
+ return false
+ }
+
+ return true
+}
+
+func defaultConfig(cfg *config.ConditionsConfig) {
+ if cfg.IsFirstField == "" {
+ cfg.IsFirstField = config.ConditionsFirstFieldWarn
+ }
+
+ if cfg.UseProtobuf == "" {
+ cfg.UseProtobuf = config.ConditionsUseProtobufSuggestFix
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/doc.go
new file mode 100644
index 0000000000..456c97abf4
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/doc.go
@@ -0,0 +1,25 @@
+/*
+conditions is a linter that verifies that the conditions field within the struct is correctly defined.
+
+conditions fields in Kuberenetes API types are expected to be a slice of metav1.Condition.
+This linter verifies that the field is a slice of metav1.Condition and that it is correctly annotated with the required markers,
+and tags.
+
+The expected condition field should look like this:
+
+ // +listType=map
+ // +listMapKey=type
+ // +patchStrategy=merge
+ // +patchMergeKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+Where the tags and markers are incorrect, the linter will suggest fixes to improve the field definition.
+
+Conditions are also idiomatically the first item in the struct, the linter will highlight when the conditions field is not the first field in the struct.
+If this is not a desired behaviour, set the linter config option `isFirstField` to `Ignore`.
+
+Protobuf tags are required for in-tree API types, but not for CRDs.
+When linting CRD based types, set the `useProtobuf` config option to `Ignore`.
+*/
+package conditions
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/initializer.go
new file mode 100644
index 0000000000..1855ed87cb
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/initializer.go
@@ -0,0 +1,30 @@
+package conditions
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return newAnalyzer(cfg.Conditions), nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/doc.go
new file mode 100644
index 0000000000..d98c51013c
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/doc.go
@@ -0,0 +1,53 @@
+/*
+analysis providers a linter registry and a set of linters that can be used to analyze Go code.
+The linters in this package are focused on Kubernetes API types and implmement API conventions
+and best practices.
+
+To use the linters provided by KAL, initialise an instance of the Registry and then initialize
+the linters within, by passing the required configuration.
+
+Example:
+
+ registry := analysis.NewRegistry()
+
+ // Initialize the linters
+ linters, err := registry.InitLinters(
+ config.Linters{
+ Enabled: []string{
+ "commentstart"
+ "jsontags",
+ "optionalorrequired",
+ },
+ Disabled: []string{
+ ...
+ },
+ },
+ config.LintersConfig{
+ JSONTags: config.JSONTagsConfig{
+ JSONTagRegex: `^[-_a-zA-Z0-9]+$`,
+ },
+ OptionalOrRequired: config.OptionalOrRequiredConfig{
+ PreferredOptionalMarker: optionalorrequired.OptionalMarker,
+ PreferredRequiredMarker: optionalorrequired.RequiredMarker,
+ },
+ },
+ )
+
+The provided list of analyzers can be used with `multichecker.Main()` from the `golang.org/x/tools/go/analysis/multichecker` package,
+or as part of a custom analysis pipeline, eg via the golangci-lint plugin system.
+
+Linters provided by KAL:
+ - [commentstart]: Linter to ensure that comments start with the serialized version of the field name.
+ - [jsontags]: Linter to ensure that JSON tags are present on struct fields, and that they match a given regex.
+ - [optionalorrequired]: Linter to ensure that all fields are marked as either optional or required.
+
+When adding new linters, ensure that they are added to the registry in the `NewRegistry` function.
+Linters should not depend on other linters, unless that linter has no configuration and is always enabled,
+see the helpers package.
+
+Any common, or shared functionality to extract data from types, should be added as a helper function in the helpers package.
+The available helpers are:
+ - extractjsontags: Extracts JSON tags from struct fields and returns the information in a structured format.
+ - markers: Extracts marker information from types and returns the information in a structured format.
+*/
+package analysis
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/analyzer.go
new file mode 100644
index 0000000000..0dafe82eaa
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/analyzer.go
@@ -0,0 +1,152 @@
+package extractjsontags
+
+import (
+ "errors"
+ "go/ast"
+ "go/token"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotCreateStructFieldTags = errors.New("could not create new structFieldTags")
+)
+
+// StructFieldTags is used to find information about
+// json tags on fields within struct.
+type StructFieldTags interface {
+ FieldTags(*ast.Field) FieldTagInfo
+}
+
+type structFieldTags struct {
+ fieldTags map[*ast.Field]FieldTagInfo
+}
+
+func newStructFieldTags() StructFieldTags {
+ return &structFieldTags{
+ fieldTags: make(map[*ast.Field]FieldTagInfo),
+ }
+}
+
+func (s *structFieldTags) insertFieldTagInfo(field *ast.Field, tagInfo FieldTagInfo) {
+ s.fieldTags[field] = tagInfo
+}
+
+// FieldTags find the tag information for the named field within the given struct.
+func (s *structFieldTags) FieldTags(field *ast.Field) FieldTagInfo {
+ return s.fieldTags[field]
+}
+
+// Analyzer is the analyzer for the jsontags package.
+// It checks that all struct fields in an API are tagged with json tags.
+var Analyzer = &analysis.Analyzer{
+ Name: "extractjsontags",
+ Doc: "Iterates over all fields in structs and extracts their json tags.",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ ResultType: reflect.TypeOf(newStructFieldTags()),
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ // Filter to structs so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.Field)(nil),
+ }
+
+ results, ok := newStructFieldTags().(*structFieldTags)
+ if !ok {
+ return nil, errCouldNotCreateStructFieldTags
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ field, ok := n.(*ast.Field)
+ if !ok {
+ return
+ }
+
+ results.insertFieldTagInfo(field, extractTagInfo(field.Tag))
+ })
+
+ return results, nil
+}
+
+func extractTagInfo(tag *ast.BasicLit) FieldTagInfo {
+ if tag == nil || tag.Value == "" {
+ return FieldTagInfo{Missing: true}
+ }
+
+ rawTag, err := strconv.Unquote(tag.Value)
+ if err != nil {
+ // This means the way AST is treating tags has changed.
+ panic(err)
+ }
+
+ tagValue, ok := reflect.StructTag(rawTag).Lookup("json")
+ if !ok {
+ return FieldTagInfo{Missing: true}
+ }
+
+ if tagValue == "" {
+ return FieldTagInfo{}
+ }
+
+ pos := tag.Pos() + token.Pos(strings.Index(tag.Value, tagValue))
+ end := pos + token.Pos(len(tagValue))
+
+ tagValues := strings.Split(tagValue, ",")
+
+ if len(tagValues) == 2 && tagValues[0] == "" && tagValues[1] == "inline" {
+ return FieldTagInfo{
+ Inline: true,
+ RawValue: tagValue,
+ Pos: pos,
+ End: end,
+ }
+ }
+
+ tagName := tagValues[0]
+
+ return FieldTagInfo{
+ Name: tagName,
+ OmitEmpty: len(tagValues) == 2 && tagValues[1] == "omitempty",
+ RawValue: tagValue,
+ Pos: pos,
+ End: end,
+ }
+}
+
+// FieldTagInfo contains information about a field's json tag.
+// This is used to pass information about a field's json tag between analyzers.
+type FieldTagInfo struct {
+ // Name is the name of the field extracted from the json tag.
+ Name string
+
+ // OmitEmpty is true if the field has the omitempty option in the json tag.
+ OmitEmpty bool
+
+ // Inline is true if the field has the inline option in the json tag.
+ Inline bool
+
+ // Missing is true when the field had no json tag.
+ Missing bool
+
+ // RawValue is the raw value from the json tag.
+ RawValue string
+
+ // Pos marks the starting position of the json tag value.
+ Pos token.Pos
+
+ // End marks the end of the json tag value.
+ End token.Pos
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/doc.go
new file mode 100644
index 0000000000..3fa3a58a65
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/doc.go
@@ -0,0 +1,34 @@
+/*
+extractjsontags is a helper package that extracts JSON tags from a struct field.
+
+It returns data behind the interface [StructFieldTags] which is used to find information about JSON tags on fields within a struct.
+
+Data about json tags, for a field within a struct can be accessed by calling the `FieldTags` method on the interface.
+
+Example:
+
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ jsonTags := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+
+ // Filter to fields so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.Field)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ field, ok := n.(*ast.Field)
+ if !ok {
+ return
+ }
+
+ tagInfo := jsonTags.FieldTags(field)
+
+ ...
+
+ })
+
+For each field, tag information is returned as a [FieldTagInfo] struct.
+This can be used to determine the name of the field, as per the json tag, whether the
+field is inline, has omitempty or is missing completely.
+*/
+package extractjsontags
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/analyzer.go
new file mode 100644
index 0000000000..b6cf847c80
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/analyzer.go
@@ -0,0 +1,381 @@
+package markers
+
+import (
+ "errors"
+ "go/ast"
+ "go/token"
+ "reflect"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+// UnnamedExpression is the expression key used
+// when parsing markers that don't have a specific
+// named expression.
+//
+// An example of a marker without a named expression
+// is "kubebuilder:default:=foo".
+//
+// An example of a marker with named expressions
+// is "kubebuilder:validation:XValidation:rule='...',message='...'".
+const UnnamedExpression = ""
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotCreateMarkers = errors.New("could not create markers")
+)
+
+// Markers allows access to markers extracted from the
+// go types.
+type Markers interface {
+ // FieldMarkers returns markers associated to the field.
+ FieldMarkers(*ast.Field) MarkerSet
+
+ // StructMarkers returns markers associated to the given sturct.
+ StructMarkers(*ast.StructType) MarkerSet
+
+ // TypeMarkers returns markers associated to the given type.
+ TypeMarkers(*ast.TypeSpec) MarkerSet
+}
+
+func newMarkers() Markers {
+ return &markers{
+ fieldMarkers: make(map[*ast.Field]MarkerSet),
+ structMarkers: make(map[*ast.StructType]MarkerSet),
+ typeMarkers: make(map[*ast.TypeSpec]MarkerSet),
+ }
+}
+
+// markers implements the storage for the implementation of the Markers interface.
+type markers struct {
+ fieldMarkers map[*ast.Field]MarkerSet
+ structMarkers map[*ast.StructType]MarkerSet
+ typeMarkers map[*ast.TypeSpec]MarkerSet
+}
+
+// FieldMarkers return the appropriate MarkerSet for the field,
+// or an empty MarkerSet if the appropriate MarkerSet isn't found.
+func (m *markers) FieldMarkers(field *ast.Field) MarkerSet {
+ fMarkers := m.fieldMarkers[field]
+
+ return NewMarkerSet(fMarkers.UnsortedList()...)
+}
+
+// StructMarkers returns the appropriate MarkerSet if found, else
+// it returns an empty MarkerSet.
+func (m *markers) StructMarkers(sTyp *ast.StructType) MarkerSet {
+ sMarkers := m.structMarkers[sTyp]
+
+ return NewMarkerSet(sMarkers.UnsortedList()...)
+}
+
+// TypeMarkers return the appropriate MarkerSet for the type,
+// or an empty MarkerSet if the appropriate MarkerSet isn't found.
+func (m *markers) TypeMarkers(typ *ast.TypeSpec) MarkerSet {
+ tMarkers := m.typeMarkers[typ]
+
+ return NewMarkerSet(tMarkers.UnsortedList()...)
+}
+
+func (m *markers) insertFieldMarkers(field *ast.Field, ms MarkerSet) {
+ m.fieldMarkers[field] = ms
+}
+
+func (m *markers) insertStructMarkers(sTyp *ast.StructType, ms MarkerSet) {
+ m.structMarkers[sTyp] = ms
+}
+
+func (m *markers) insertTypeMarkers(typ *ast.TypeSpec, ms MarkerSet) {
+ m.typeMarkers[typ] = ms
+}
+
+// Analyzer is the analyzer for the markers package.
+// It iterates over declarations within a package and parses the comments to extract markers.
+var Analyzer = &analysis.Analyzer{
+ Name: "markers",
+ Doc: "Iterates over declarations within a package and parses the comments to extract markers",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ ResultType: reflect.TypeOf(newMarkers()),
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ nodeFilter := []ast.Node{
+ // In order to get the godoc comments from a type
+ // definition as such:
+ //
+ // // comment
+ // type Foo struct {...}
+ //
+ // We need to use the ast.GenDecl type instead of the
+ // ast.TypeSpec type. The ast.TypeSpec.Doc field will only
+ // be populated if types are defined as such:
+ //
+ // type(
+ // // comment
+ // Foo struct {...}
+ // )
+ //
+ // For more information, see https://github.com/golang/go/issues/27477
+ (*ast.GenDecl)(nil),
+ (*ast.Field)(nil),
+ }
+
+ results, ok := newMarkers().(*markers)
+ if !ok {
+ return nil, errCouldNotCreateMarkers
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ switch typ := n.(type) {
+ case *ast.GenDecl:
+ extractGenDeclMarkers(typ, results)
+ case *ast.Field:
+ extractFieldMarkers(typ, results)
+ }
+ })
+
+ return results, nil
+}
+
+func extractGenDeclMarkers(typ *ast.GenDecl, results *markers) {
+ declMarkers := NewMarkerSet()
+
+ if typ.Doc != nil {
+ for _, comment := range typ.Doc.List {
+ if marker := extractMarker(comment); marker.Identifier != "" {
+ declMarkers.Insert(marker)
+ }
+ }
+ }
+
+ if len(typ.Specs) == 0 {
+ return
+ }
+
+ tSpec, ok := typ.Specs[0].(*ast.TypeSpec)
+ if !ok {
+ return
+ }
+
+ results.insertTypeMarkers(tSpec, declMarkers)
+
+ if sTyp, ok := tSpec.Type.(*ast.StructType); ok {
+ results.insertStructMarkers(sTyp, declMarkers)
+ }
+}
+
+func extractFieldMarkers(field *ast.Field, results *markers) {
+ if field == nil || field.Doc == nil {
+ return
+ }
+
+ fieldMarkers := NewMarkerSet()
+
+ for _, comment := range field.Doc.List {
+ if marker := extractMarker(comment); marker.Identifier != "" {
+ fieldMarkers.Insert(marker)
+ }
+ }
+
+ results.insertFieldMarkers(field, fieldMarkers)
+}
+
+func extractMarker(comment *ast.Comment) Marker {
+ if !strings.HasPrefix(comment.Text, "// +") {
+ return Marker{}
+ }
+
+ markerContent := strings.TrimPrefix(comment.Text, "// +")
+ id, expressions := extractMarkerIDAndExpressions(DefaultRegistry(), markerContent)
+
+ return Marker{
+ Identifier: id,
+ Expressions: expressions,
+ RawComment: comment.Text,
+ Pos: comment.Pos(),
+ End: comment.End(),
+ }
+}
+
+func extractMarkerIDAndExpressions(knownMarkers Registry, marker string) (string, map[string]string) {
+ if id, ok := knownMarkers.Match(marker); ok {
+ return extractKnownMarkerIDAndExpressions(id, marker)
+ }
+
+ return extractUnknownMarkerIDAndExpressions(marker)
+}
+
+func extractKnownMarkerIDAndExpressions(id string, marker string) (string, map[string]string) {
+ return id, extractExpressions(strings.TrimPrefix(marker, id))
+}
+
+func extractExpressions(expressions string) map[string]string {
+ expressionsMap := map[string]string{}
+
+ // Do some normalization work to ensure we can parse expressions in
+ // a standard way. Trim any lingering colons (:) and replace all ':='s with '='
+ expressions = strings.TrimPrefix(expressions, ":")
+ expressions = strings.ReplaceAll(expressions, ":=", "=")
+
+ // split expression string on commas (,) to handle multiple expressions
+ // in a single marker
+ chainedExpressions := strings.Split(expressions, ",")
+ for _, chainedExpression := range chainedExpressions {
+ exps := strings.SplitN(chainedExpression, "=", 2)
+ if len(exps) < 2 {
+ continue
+ }
+
+ expressionsMap[exps[0]] = exps[1]
+ }
+
+ return expressionsMap
+}
+
+func extractUnknownMarkerIDAndExpressions(marker string) (string, map[string]string) {
+ // if there is only a single "=" split on the equal sign and trim any
+ // dangling ":" characters.
+ if strings.Count(marker, "=") == 1 {
+ splits := strings.Split(marker, "=")
+ // Trim any dangling ":" characters on the identifier to handle
+ // cases like +kubebuilder:object:root:=true
+ identifier := strings.TrimSuffix(splits[0], ":")
+
+ // If there is a single "=" sign that means the left side of the
+ // marker is the identifier and there is no real expression identifier.
+ expressions := map[string]string{UnnamedExpression: splits[1]}
+
+ return identifier, expressions
+ }
+
+ // split on :
+ separators := strings.Split(marker, ":")
+
+ identifier := ""
+ expressionString := ""
+
+ for _, item := range separators {
+ // Not an expression
+ if strings.Count(item, "=") == 0 {
+ if identifier == "" {
+ identifier = item
+
+ continue
+ }
+
+ identifier = strings.Join([]string{identifier, item}, ":")
+
+ continue
+ }
+
+ // The item is likely an expression, join it with any existing expression string.
+ // While something like 'foo:bar=baz:value=something' isn't a valid marker based on our
+ // current understanding, this logic should ensure we are joining expressions appropriately
+ // in a scenario like this.
+ if expressionString == "" {
+ expressionString = item
+ continue
+ }
+
+ expressionString = strings.Join([]string{expressionString, item}, ",")
+ }
+
+ expressions := extractExpressions(expressionString)
+
+ return identifier, expressions
+}
+
+// Marker represents a marker extracted from a comment on a declaration.
+type Marker struct {
+ // Identifier is the value of the marker once the leading comment, '+', and expressions are trimmed.
+ Identifier string
+
+ // Expressions are the set of expressions that have been specified for the marker
+ Expressions map[string]string
+
+ // RawComment is the raw comment line, unfiltered.
+ RawComment string
+
+ // Pos is the starting position in the file for the comment line containing the marker.
+ Pos token.Pos
+
+ // End is the ending position in the file for the coment line containing the marker.
+ End token.Pos
+}
+
+// MarkerSet is a set implementation for Markers that uses
+// the Marker identifier as the key, but returns all full Markers
+// with that identifier as the result.
+type MarkerSet map[string][]Marker
+
+// NewMarkerSet initialises a new MarkerSet with the provided values.
+// If any markers have the same identifier, they will both be added to
+// the list of markers for that identifier. No duplication checks are implemented.
+func NewMarkerSet(markers ...Marker) MarkerSet {
+ ms := make(MarkerSet)
+
+ ms.Insert(markers...)
+
+ return ms
+}
+
+// Insert adds the given markers to the MarkerSet.
+// If any markers have the same value, the latter marker in the list
+// will take precedence, no duplication checks are implemented.
+func (ms MarkerSet) Insert(markers ...Marker) {
+ for _, marker := range markers {
+ ms[marker.Identifier] = append(ms[marker.Identifier], marker)
+ }
+}
+
+// Has returns whether marker(s) with the identifier given is present in the
+// MarkerSet. If Has returns true, there is at least one marker
+// with this identifier.
+func (ms MarkerSet) Has(identifier string) bool {
+ _, ok := ms[identifier]
+ return ok
+}
+
+// HasWithValue returns whether marker(s) with the given identifier and
+// expression values (i.e "kubebuilder:object:root:=true") is present
+// in the MarkerSet.
+func (ms MarkerSet) HasWithValue(marker string) bool {
+ return ms.HasWithExpressions(extractMarkerIDAndExpressions(DefaultRegistry(), marker))
+}
+
+// HasWithExpressions returns whether marker(s) with the identifier and
+// expressions are present in the MarkerSet.
+func (ms MarkerSet) HasWithExpressions(identifier string, expressions map[string]string) bool {
+ markers, ok := ms[identifier]
+ if !ok {
+ return false
+ }
+
+ for _, marker := range markers {
+ if reflect.DeepEqual(marker.Expressions, expressions) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// UnsortedList returns a list of the markers, in no particular order.
+func (ms MarkerSet) UnsortedList() []Marker {
+ markers := []Marker{}
+
+ for _, marker := range ms {
+ markers = append(markers, marker...)
+ }
+
+ return markers
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/doc.go
new file mode 100644
index 0000000000..bb10107469
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/doc.go
@@ -0,0 +1,53 @@
+/*
+markers is a helper used to extract marker information from types.
+A marker is a comment line preceded with `+` that indicates to a generator something about the field or type.
+
+The package returns a [Markers] interface, which can be used to access markers associated with a struct or a field within a struct.
+
+Example:
+
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ markersAccess := pass.ResultOf[markers.Analyzer].(markers.Markers)
+
+ // Filter to structs so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ sTyp, ok := n.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ if sTyp.Fields == nil {
+ return
+ }
+
+ for _, field := range sTyp.Fields.List {
+ if field == nil || len(field.Names) == 0 {
+ continue
+ }
+
+ structMarkers := markersAccess.StructMarkers(sTyp)
+ fieldMarkers := markersAccess.FieldMarkers(field)
+
+ ...
+ }
+ })
+
+The result of StructMarkers or StructFieldMarkers is a [MarkerSet] which can be used to determine the presence of a marker, and the value of the marker.
+The MarkerSet is indexed based on the value of the marker, once the prefix `+` is removed.
+
+Additional information about the marker can be found in the [Marker] struct, for each marker on the field.
+
+Example:
+
+ fieldMarkers := markersAccess.FieldMarkers(field)
+
+ if fieldMarkers.Has("required") {
+ requiredMarker := fieldMarkers["required"]
+ ...
+ }
+*/
+package markers
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/registry.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/registry.go
new file mode 100644
index 0000000000..615f2e2f41
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/registry.go
@@ -0,0 +1,83 @@
+package markers
+
+import (
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Registry is a thread-safe set of known marker identifiers.
+type Registry interface {
+ // Register adds the provided identifiers to the Registry.
+ Register(ids ...string)
+
+ // Match performs a greedy match to determine if an input
+ // marker string matches a known marker identifier. It returns
+ // the matched identifier and a boolean representing if a match was
+ // found. If no match is found, the returned identifier will be an
+ // empty string.
+ Match(in string) (string, bool)
+}
+
+var defaultRegistry = NewRegistry() //nolint:gochecknoglobals
+
+// DefaultRegistry is a global registry for known markers.
+// New linters should register the markers they care about during
+// an init() function.
+func DefaultRegistry() Registry {
+ return defaultRegistry
+}
+
+// Registry is a thread-safe set of known marker identifiers.
+type registry struct {
+ identifiers sets.Set[string]
+ mu sync.Mutex
+}
+
+// NewRegistry creates a new Registry.
+func NewRegistry() Registry {
+ return ®istry{
+ identifiers: sets.New[string](),
+ mu: sync.Mutex{},
+ }
+}
+
+// Register adds the provided identifiers to the Registry.
+func (r *registry) Register(ids ...string) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.identifiers.Insert(ids...)
+}
+
+// Match performs a greedy match to determine if an input
+// marker string matches a known marker identifier. It returns
+// the matched identifier and a boolean representing if a match was
+// found. If no match is found, the returned identifier will be an
+// empty string.
+func (r *registry) Match(in string) (string, bool) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ // If there is an exact match, return early.
+ // This is likely when using markers with no expressions like
+ // optional, required, kubebuilder:validation:Required, etc.
+ if ok := r.identifiers.Has(in); ok {
+ return in, true
+ }
+
+ // Look for the longest matching known identifier
+ bestMatch := ""
+ foundMatch := false
+
+ for _, id := range r.identifiers.UnsortedList() {
+ if strings.HasPrefix(in, id) {
+ if len(bestMatch) < len(id) {
+ bestMatch = id
+ foundMatch = true
+ }
+ }
+ }
+
+ return bestMatch, foundMatch
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/analyzer.go
new file mode 100644
index 0000000000..000052cacb
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/analyzer.go
@@ -0,0 +1,64 @@
+package integers
+
+import (
+ "errors"
+ "go/ast"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/utils"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const name = "integers"
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+)
+
+// Analyzer is the analyzer for the integers package.
+// It checks that no struct fields or type aliases are `int`, or unsigned integers.
+var Analyzer = &analysis.Analyzer{
+ Name: name,
+ Doc: "All integers should be explicit about their size, int32 and int64 should be used over plain int. Unsigned ints are not allowed.",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ // Filter to fields so that we can look at fields within structs.
+ // Filter typespecs so that we can look at type aliases.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ (*ast.TypeSpec)(nil),
+ }
+
+ typeChecker := utils.NewTypeChecker(checkIntegers)
+
+ // Preorder visits all the nodes of the AST in depth-first order. It calls
+ // f(n) for each node n before it visits n's children.
+ //
+ // We use the filter defined above, ensuring we only look at struct fields and type declarations.
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ typeChecker.CheckNode(pass, n)
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+// checkIntegers looks for known type of integers that do not match the allowed `int32` or `int64` requirements.
+func checkIntegers(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) {
+ switch ident.Name {
+ case "int32", "int64":
+ // Valid cases
+ case "int", "int8", "int16":
+ pass.Reportf(node.Pos(), "%s should not use an int, int8 or int16. Use int32 or int64 depending on bounding requirements", prefix)
+ case "uint", "uint8", "uint16", "uint32", "uint64":
+ pass.Reportf(node.Pos(), "%s should not use unsigned integers, use only int32 or int64 and apply validation to ensure the value is positive", prefix)
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/doc.go
new file mode 100644
index 0000000000..2cbe3691ff
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/doc.go
@@ -0,0 +1,15 @@
+/*
+integers is an analyzer that checks for usage of unsupported integer types.
+
+According to the API conventions, only int32 and int64 types should be used in Kubernetes APIs.
+
+int32 is preferred and should be used in most cases, unless the use case requireds representing
+values larger than int32.
+
+It also states that unsigned integers should be replaced with signed integers, and then numeric
+lower bounds added to prevent negative integers.
+
+Succinctly this anaylzer checks for int, int8, int16, uint, uint8, uint16, uint32 and uint64 types
+and highlights that they should not be used.
+*/
+package integers
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/initializer.go
new file mode 100644
index 0000000000..dc41ec663e
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/initializer.go
@@ -0,0 +1,30 @@
+package integers
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return Analyzer, nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/analyzer.go
new file mode 100644
index 0000000000..156f945a7d
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/analyzer.go
@@ -0,0 +1,122 @@
+package jsontags
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "regexp"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "github.com/JoelSpeed/kal/pkg/config"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ // camelCaseRegex is a regular expression that matches camel case strings.
+ camelCaseRegex = "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$"
+
+ name = "jsontags"
+)
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetJSONTags = errors.New("could not get json tags")
+)
+
+type analyzer struct {
+ jsonTagRegex *regexp.Regexp
+}
+
+// newAnalyzer creates a new analyzer with the given json tag regex.
+func newAnalyzer(cfg config.JSONTagsConfig) (*analysis.Analyzer, error) {
+ defaultConfig(&cfg)
+
+ jsonTagRegex, err := regexp.Compile(cfg.JSONTagRegex)
+ if err != nil {
+ return nil, fmt.Errorf("could not compile json tag regex: %w", err)
+ }
+
+ a := &analyzer{
+ jsonTagRegex: jsonTagRegex,
+ }
+
+ return &analysis.Analyzer{
+ Name: name,
+ Doc: "Check that all struct fields in an API are tagged with json tags",
+ Run: a.run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, extractjsontags.Analyzer},
+ }, nil
+}
+
+func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+ if !ok {
+ return nil, errCouldNotGetJSONTags
+ }
+
+ // Filter to fields so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ sTyp, ok := n.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ if sTyp.Fields == nil {
+ return
+ }
+
+ for _, field := range sTyp.Fields.List {
+ a.checkField(pass, field, jsonTags)
+ }
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func (a *analyzer) checkField(pass *analysis.Pass, field *ast.Field, jsonTags extractjsontags.StructFieldTags) {
+ tagInfo := jsonTags.FieldTags(field)
+
+ var prefix string
+ if len(field.Names) > 0 && field.Names[0] != nil {
+ prefix = fmt.Sprintf("field %s", field.Names[0].Name)
+ } else if ident, ok := field.Type.(*ast.Ident); ok {
+ prefix = fmt.Sprintf("embedded field %s", ident.Name)
+ }
+
+ if tagInfo.Missing {
+ pass.Reportf(field.Pos(), "%s is missing json tag", prefix)
+ return
+ }
+
+ if tagInfo.Inline {
+ return
+ }
+
+ if tagInfo.Name == "" {
+ pass.Reportf(field.Pos(), "%s has empty json tag", prefix)
+ return
+ }
+
+ matched := a.jsonTagRegex.Match([]byte(tagInfo.Name))
+ if !matched {
+ pass.Reportf(field.Pos(), "%s json tag does not match pattern %q: %s", prefix, a.jsonTagRegex.String(), tagInfo.Name)
+ }
+}
+
+func defaultConfig(cfg *config.JSONTagsConfig) {
+ if cfg.JSONTagRegex == "" {
+ cfg.JSONTagRegex = camelCaseRegex
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/doc.go
new file mode 100644
index 0000000000..1e3ad31cd2
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/doc.go
@@ -0,0 +1,15 @@
+/*
+jsontags provides a linter to ensure that JSON tags are present on struct fields, and that they match a given regex.
+
+Kubernetes API types should have JSON tags on all fields, to ensure that the fields are correctly serialized and deserialized.
+The JSON tags should be camelCase, with a lower case first letter, and should match the field name in all but capitalization.
+There should be no hyphens or underscores in either the field name or the JSON tag.
+
+The linter can be configured with a regex to match the JSON tags against.
+By default, the regex is `^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$`, which allows for camelCase JSON tags, with consecutive capital letters,
+to allow, for example, for fields like `requestTTLSeconds`.
+
+To disallow consecutive capital letters, the regex can be set to `^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]+)*$`.
+The regex can be configured with the JSONTagRegex field in the JSONTagsConfig struct.
+*/
+package jsontags
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/initializer.go
new file mode 100644
index 0000000000..75844302a8
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/initializer.go
@@ -0,0 +1,30 @@
+package jsontags
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return newAnalyzer(cfg.JSONTags)
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/analyzer.go
new file mode 100644
index 0000000000..dc967f87e1
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/analyzer.go
@@ -0,0 +1,225 @@
+package maxlength
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ name = "maxlength"
+
+ kubebuilderMaxLength = "kubebuilder:validation:MaxLength"
+ kubebuilderEnum = "kubebuilder:validation:Enum"
+ kubebuilderFormat = "kubebuilder:validation:Format"
+
+ kubebuilderItemsMaxLength = "kubebuilder:validation:items:MaxLength"
+ kubebuilderItemsEnum = "kubebuilder:validation:items:Enum"
+ kubebuilderItemsFormat = "kubebuilder:validation:items:Format"
+
+ kubebuilderMaxItems = "kubebuilder:validation:MaxItems"
+)
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetMarkers = errors.New("could not get markers")
+)
+
+// Analyzer is the analyzer for the maxlength package.
+// It checks that strings and arrays have maximum lengths and maximum items respectively.
+var Analyzer = &analysis.Analyzer{
+ Name: name,
+ Doc: "Checks that all strings formatted fields are marked with a maximum length, and that arrays are marked with max items.",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer},
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers)
+ if !ok {
+ return nil, errCouldNotGetMarkers
+ }
+
+ // Filter to structs so that we can iterate over the fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ sTyp, ok := n.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ if sTyp.Fields == nil {
+ return
+ }
+
+ for _, field := range sTyp.Fields.List {
+ checkField(pass, field, markersAccess)
+ }
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func checkField(pass *analysis.Pass, field *ast.Field, markersAccess markers.Markers) {
+ if len(field.Names) == 0 || field.Names[0] == nil {
+ return
+ }
+
+ fieldName := field.Names[0].Name
+ prefix := fmt.Sprintf("field %s", fieldName)
+
+ checkTypeExpr(pass, field.Type, field, nil, markersAccess, prefix, kubebuilderMaxLength, needsStringMaxLength)
+}
+
+func checkIdent(pass *analysis.Pass, ident *ast.Ident, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) {
+ if ident.Obj == nil { // Built-in type
+ checkString(pass, ident, node, aliases, markersAccess, prefix, marker, needsMaxLength)
+
+ return
+ }
+
+ tSpec, ok := ident.Obj.Decl.(*ast.TypeSpec)
+ if !ok {
+ return
+ }
+
+ checkTypeSpec(pass, tSpec, node, append(aliases, tSpec), markersAccess, fmt.Sprintf("%s type", prefix), marker, needsMaxLength)
+}
+
+func checkString(pass *analysis.Pass, ident *ast.Ident, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) {
+ if ident.Name != "string" {
+ return
+ }
+
+ markers := getCombinedMarkers(markersAccess, node, aliases)
+
+ if needsMaxLength(markers) {
+ pass.Reportf(node.Pos(), "%s must have a maximum length, add %s marker", prefix, marker)
+ }
+}
+
+func checkTypeSpec(pass *analysis.Pass, tSpec *ast.TypeSpec, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) {
+ if tSpec.Name == nil {
+ return
+ }
+
+ typeName := tSpec.Name.Name
+ prefix = fmt.Sprintf("%s %s", prefix, typeName)
+
+ checkTypeExpr(pass, tSpec.Type, node, aliases, markersAccess, prefix, marker, needsMaxLength)
+}
+
+func checkTypeExpr(pass *analysis.Pass, typeExpr ast.Expr, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) {
+ switch typ := typeExpr.(type) {
+ case *ast.Ident:
+ checkIdent(pass, typ, node, aliases, markersAccess, prefix, marker, needsMaxLength)
+ case *ast.StarExpr:
+ checkTypeExpr(pass, typ.X, node, aliases, markersAccess, prefix, marker, needsMaxLength)
+ case *ast.ArrayType:
+ checkArrayType(pass, typ, node, aliases, markersAccess, prefix)
+ }
+}
+
+func checkArrayType(pass *analysis.Pass, arrayType *ast.ArrayType, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix string) {
+ if arrayType.Elt != nil {
+ if ident, ok := arrayType.Elt.(*ast.Ident); ok {
+ checkArrayElementIdent(pass, ident, node, aliases, markersAccess, fmt.Sprintf("%s array element", prefix))
+ }
+ }
+
+ markers := getCombinedMarkers(markersAccess, node, aliases)
+
+ if !markers.Has(kubebuilderMaxItems) {
+ pass.Reportf(node.Pos(), "%s must have a maximum items, add %s marker", prefix, kubebuilderMaxItems)
+ }
+}
+
+func checkArrayElementIdent(pass *analysis.Pass, ident *ast.Ident, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix string) {
+ if ident.Obj == nil { // Built-in type
+ checkString(pass, ident, node, aliases, markersAccess, prefix, kubebuilderItemsMaxLength, needsItemsMaxLength)
+
+ return
+ }
+
+ tSpec, ok := ident.Obj.Decl.(*ast.TypeSpec)
+ if !ok {
+ return
+ }
+
+ // If the array element wasn't directly a string, allow a string alias to be used
+ // with either the items style markers or the on alias style markers.
+ checkTypeSpec(pass, tSpec, node, append(aliases, tSpec), markersAccess, fmt.Sprintf("%s type", prefix), kubebuilderMaxLength, func(ms markers.MarkerSet) bool {
+ return needsStringMaxLength(ms) && needsItemsMaxLength(ms)
+ })
+}
+
+func getCombinedMarkers(markersAccess markers.Markers, node ast.Node, aliases []*ast.TypeSpec) markers.MarkerSet {
+ base := markers.NewMarkerSet(getMarkers(markersAccess, node).UnsortedList()...)
+
+ for _, a := range aliases {
+ base.Insert(getMarkers(markersAccess, a).UnsortedList()...)
+ }
+
+ return base
+}
+
+func getMarkers(markersAccess markers.Markers, node ast.Node) markers.MarkerSet {
+ switch t := node.(type) {
+ case *ast.Field:
+ return markersAccess.FieldMarkers(t)
+ case *ast.TypeSpec:
+ return markersAccess.TypeMarkers(t)
+ }
+
+ return nil
+}
+
+// needsMaxLength returns true if the field needs a maximum length.
+// Fields do not need a maximum length if they are already marked with a maximum length,
+// or if they are an enum, or if they are a date, date-time or duration.
+func needsStringMaxLength(markerSet markers.MarkerSet) bool {
+ switch {
+ case markerSet.Has(kubebuilderMaxLength),
+ markerSet.Has(kubebuilderEnum),
+ markerSet.HasWithValue(kubebuilderFormatWithValue("date")),
+ markerSet.HasWithValue(kubebuilderFormatWithValue("date-time")),
+ markerSet.HasWithValue(kubebuilderFormatWithValue("duration")):
+ return false
+ }
+
+ return true
+}
+
+func needsItemsMaxLength(markerSet markers.MarkerSet) bool {
+ switch {
+ case markerSet.Has(kubebuilderItemsMaxLength),
+ markerSet.Has(kubebuilderItemsEnum),
+ markerSet.HasWithValue(kubebuilderItemsFormatWithValue("date")),
+ markerSet.HasWithValue(kubebuilderItemsFormatWithValue("date-time")),
+ markerSet.HasWithValue(kubebuilderItemsFormatWithValue("duration")):
+ return false
+ }
+
+ return true
+}
+
+func kubebuilderFormatWithValue(value string) string {
+ return fmt.Sprintf("%s:=%s", kubebuilderFormat, value)
+}
+
+func kubebuilderItemsFormatWithValue(value string) string {
+ return fmt.Sprintf("%s:=%s", kubebuilderItemsFormat, value)
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/doc.go
new file mode 100644
index 0000000000..25cf68de9a
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/doc.go
@@ -0,0 +1,19 @@
+/*
+maxlength is an analyzer that checks that all string fields have a maximum length, and that all array fields have a maximum number of items.
+
+String fields that are not otherwise bound in length, through being an enum or formatted in a certain way, should have a maximum length.
+This ensures that CEL validations on the field are not overly costly in terms of time and memory.
+
+Array fields should have a maximum number of items.
+This ensures that any CEL validations on the field are not overly costly in terms of time and memory.
+Where arrays are used to represent a list of structures, CEL rules may exist within the array.
+Limiting the array length ensures the cardinality of the rules within the array is not unbounded.
+
+For strings, the maximum length can be set using the `kubebuilder:validation:MaxLength` tag.
+For arrays, the maximum number of items can be set using the `kubebuilder:validation:MaxItems` tag.
+
+For arrays of strings, the maximum length of each string can be set using the `kubebuilder:validation:items:MaxLength` tag,
+on the array field itself.
+Or, if the array uses a string type alias, the `kubebuilder:validation:MaxLength` tag can be used on the alias.
+*/
+package maxlength
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/initializer.go
new file mode 100644
index 0000000000..53ff89ac81
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/initializer.go
@@ -0,0 +1,30 @@
+package maxlength
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return Analyzer, nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return false // For now, CRD only, and so not on by default.
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/analyzer.go
new file mode 100644
index 0000000000..afdc928f3f
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/analyzer.go
@@ -0,0 +1,58 @@
+package nobools
+
+import (
+ "errors"
+ "go/ast"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/utils"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const name = "nobools"
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+)
+
+// Analyzer is the analyzer for the nobools package.
+// It checks that no struct fields are `bool`.
+var Analyzer = &analysis.Analyzer{
+ Name: name,
+ Doc: "Boolean values cannot evolve over time, use an enum with meaningful values instead",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ // Filter to fields so that we can look at fields within structs.
+ // Filter typespecs so that we can look at type aliases.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ (*ast.TypeSpec)(nil),
+ }
+
+ typeChecker := utils.NewTypeChecker(checkBool)
+
+ // Preorder visits all the nodes of the AST in depth-first order. It calls
+ // f(n) for each node n before it visits n's children.
+ //
+ // We use the filter defined above, ensuring we only look at struct fields and type declarations.
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ typeChecker.CheckNode(pass, n)
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func checkBool(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) {
+ if ident.Name == "bool" {
+ pass.Reportf(node.Pos(), "%s should not use a bool. Use a string type with meaningful constant values as an enum.", prefix)
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/doc.go
new file mode 100644
index 0000000000..2165e6a424
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/doc.go
@@ -0,0 +1,15 @@
+/*
+nobools is an analyzer that checks for usage of bool types.
+
+Boolean values can only ever have 2 states, true or false.
+Over time, needs may change, and with a bool type, there is no way to add additional states.
+This problem then leads to pairs of bools, where values of one are only valid given the value of another.
+This is confusing and error-prone.
+
+It is recommended instead to use a string type with a set of constants to represent the different states,
+creating an enum.
+
+By using an enum, not only can you provide meaningul names for the various states of the API,
+but you can also add additional states in the future without breaking the API.
+*/
+package nobools
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/initializer.go
new file mode 100644
index 0000000000..e12ac15893
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/initializer.go
@@ -0,0 +1,32 @@
+package nobools
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return Analyzer, nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ // Bools avoidance in the Kube conventions is not a must.
+ // Make this opt in depending on the projects own preference.
+ return false
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/analyzer.go
new file mode 100644
index 0000000000..bdec59f1c4
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/analyzer.go
@@ -0,0 +1,85 @@
+package nophase
+
+import (
+ "errors"
+ "go/ast"
+ "strings"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const name = "nophase"
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetJSONTags = errors.New("could not get json tags")
+)
+
+// Analyzer is the analyzer for the nophase package.
+// It checks that no struct fields named 'phase', or that contain phase as a
+// substring are present.
+var Analyzer = &analysis.Analyzer{
+ Name: name,
+ Doc: "phase fields are deprecated and conditions should be preferred, avoid phase like enum fields",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, extractjsontags.Analyzer},
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+ if !ok {
+ return nil, errCouldNotGetJSONTags
+ }
+
+ // Filter to fields so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.Field)(nil),
+ }
+
+ // Preorder visits all the nodes of the AST in depth-first order. It calls
+ // f(n) for each node n before it visits n's children.
+ //
+ // We use the filter defined above, ensuring we only look at struct fields.
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ field, ok := n.(*ast.Field)
+ if !ok {
+ return
+ }
+
+ if field == nil || len(field.Names) == 0 {
+ return
+ }
+
+ fieldName := field.Names[0].Name
+
+ // First check if the struct field name contains 'phase'
+ if strings.Contains(strings.ToLower(fieldName), "phase") {
+ pass.Reportf(field.Pos(),
+ "field %s: phase fields are deprecated and conditions should be preferred, avoid phase like enum fields",
+ fieldName,
+ )
+
+ return
+ }
+
+ // Then check if the json serialization of the field contains 'phase'
+ tagInfo := jsonTags.FieldTags(field)
+
+ if strings.Contains(strings.ToLower(tagInfo.Name), "phase") {
+ pass.Reportf(field.Pos(),
+ "field %s: phase fields are deprecated and conditions should be preferred, avoid phase like enum fields",
+ fieldName,
+ )
+ }
+ })
+
+ return nil, nil //nolint:nilnil
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/doc.go
new file mode 100644
index 0000000000..edeb3f6f2e
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/doc.go
@@ -0,0 +1,11 @@
+/*
+nophase provides a linter to ensure that structs do not contain a Phase field.
+
+Phase fields are deprecated and conditions should be preferred. Avoid phase like enum
+fields.
+
+The linter will flag any struct field containing the substring 'phase'. This means both
+Phase and FooPhase will be flagged.
+*/
+
+package nophase
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/initializer.go
new file mode 100644
index 0000000000..0554aecb59
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/initializer.go
@@ -0,0 +1,30 @@
+package nophase
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return Analyzer, nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/analyzer.go
new file mode 100644
index 0000000000..74b866eb29
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/analyzer.go
@@ -0,0 +1,243 @@
+package optionalorrequired
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers"
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ name = "optionalorrequired"
+
+ // OptionalMarker is the marker that indicates that a field is optional.
+ OptionalMarker = "optional"
+
+ // RequiredMarker is the marker that indicates that a field is required.
+ RequiredMarker = "required"
+
+ // KubebuilderOptionalMarker is the marker that indicates that a field is optional in kubebuilder.
+ KubebuilderOptionalMarker = "kubebuilder:validation:Optional"
+
+ // KubebuilderRequiredMarker is the marker that indicates that a field is required in kubebuilder.
+ KubebuilderRequiredMarker = "kubebuilder:validation:Required"
+)
+
+func init() {
+ markers.DefaultRegistry().Register(
+ OptionalMarker,
+ RequiredMarker,
+ KubebuilderOptionalMarker,
+ KubebuilderRequiredMarker,
+ )
+}
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetMarkers = errors.New("could not get markers")
+ errCouldNotGetJSONTags = errors.New("could not get jsontags")
+)
+
+type analyzer struct {
+ primaryOptionalMarker string
+ secondaryOptionalMarker string
+
+ primaryRequiredMarker string
+ secondaryRequiredMarker string
+}
+
+// newAnalyzer creates a new analyzer with the given configuration.
+func newAnalyzer(cfg config.OptionalOrRequiredConfig) *analysis.Analyzer {
+ defaultConfig(&cfg)
+
+ a := &analyzer{}
+
+ switch cfg.PreferredOptionalMarker {
+ case OptionalMarker:
+ a.primaryOptionalMarker = OptionalMarker
+ a.secondaryOptionalMarker = KubebuilderOptionalMarker
+ case KubebuilderOptionalMarker:
+ a.primaryOptionalMarker = KubebuilderOptionalMarker
+ a.secondaryOptionalMarker = OptionalMarker
+ }
+
+ switch cfg.PreferredRequiredMarker {
+ case RequiredMarker:
+ a.primaryRequiredMarker = RequiredMarker
+ a.secondaryRequiredMarker = KubebuilderRequiredMarker
+ case KubebuilderRequiredMarker:
+ a.primaryRequiredMarker = KubebuilderRequiredMarker
+ a.secondaryRequiredMarker = RequiredMarker
+ }
+
+ return &analysis.Analyzer{
+ Name: name,
+ Doc: "Checks that all struct fields are marked either with the optional or required markers.",
+ Run: a.run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer},
+ }
+}
+
+func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers)
+ if !ok {
+ return nil, errCouldNotGetMarkers
+ }
+
+ jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+ if !ok {
+ return nil, errCouldNotGetJSONTags
+ }
+
+ // Filter to fields so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.StructType)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ sTyp, ok := n.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ if sTyp.Fields == nil {
+ return
+ }
+
+ for _, field := range sTyp.Fields.List {
+ fieldMarkers := markersAccess.FieldMarkers(field)
+ fieldTagInfo := jsonTags.FieldTags(field)
+
+ a.checkField(pass, field, fieldMarkers, fieldTagInfo)
+ }
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+//nolint:cyclop
+func (a *analyzer) checkField(pass *analysis.Pass, field *ast.Field, fieldMarkers markers.MarkerSet, fieldTagInfo extractjsontags.FieldTagInfo) {
+ if fieldTagInfo.Inline {
+ // Inline fields would have no effect if they were marked as optional/required.
+ return
+ }
+
+ var prefix string
+ if len(field.Names) > 0 && field.Names[0] != nil {
+ prefix = fmt.Sprintf("field %s", field.Names[0].Name)
+ } else if ident, ok := field.Type.(*ast.Ident); ok {
+ prefix = fmt.Sprintf("embedded field %s", ident.Name)
+ }
+
+ hasPrimaryOptional := fieldMarkers.Has(a.primaryOptionalMarker)
+ hasPrimaryRequired := fieldMarkers.Has(a.primaryRequiredMarker)
+
+ hasSecondaryOptional := fieldMarkers.Has(a.secondaryOptionalMarker)
+ hasSecondaryRequired := fieldMarkers.Has(a.secondaryRequiredMarker)
+
+ hasEitherOptional := hasPrimaryOptional || hasSecondaryOptional
+ hasEitherRequired := hasPrimaryRequired || hasSecondaryRequired
+
+ hasBothOptional := hasPrimaryOptional && hasSecondaryOptional
+ hasBothRequired := hasPrimaryRequired && hasSecondaryRequired
+
+ switch {
+ case hasEitherOptional && hasEitherRequired:
+ pass.Reportf(field.Pos(), "%s must not be marked as both optional and required", prefix)
+ case hasSecondaryOptional:
+ marker := fieldMarkers[a.secondaryOptionalMarker]
+ if hasBothOptional {
+ pass.Report(reportShouldRemoveSecondaryMarker(field, marker, a.primaryOptionalMarker, a.secondaryOptionalMarker, prefix))
+ } else {
+ pass.Report(reportShouldReplaceSecondaryMarker(field, marker, a.primaryOptionalMarker, a.secondaryOptionalMarker, prefix))
+ }
+ case hasSecondaryRequired:
+ marker := fieldMarkers[a.secondaryRequiredMarker]
+ if hasBothRequired {
+ pass.Report(reportShouldRemoveSecondaryMarker(field, marker, a.primaryRequiredMarker, a.secondaryRequiredMarker, prefix))
+ } else {
+ pass.Report(reportShouldReplaceSecondaryMarker(field, marker, a.primaryRequiredMarker, a.secondaryRequiredMarker, prefix))
+ }
+ case hasPrimaryOptional || hasPrimaryRequired:
+ // This is the correct state.
+ default:
+ pass.Reportf(field.Pos(), "%s must be marked as %s or %s", prefix, a.primaryOptionalMarker, a.primaryRequiredMarker)
+ }
+}
+
+func reportShouldReplaceSecondaryMarker(field *ast.Field, marker []markers.Marker, primaryMarker, secondaryMarker, prefix string) analysis.Diagnostic {
+ textEdits := make([]analysis.TextEdit, len(marker))
+
+ for i, m := range marker {
+ if i == 0 {
+ textEdits[i] = analysis.TextEdit{
+ Pos: m.Pos,
+ End: m.End,
+ NewText: []byte(fmt.Sprintf("// +%s", primaryMarker)),
+ }
+
+ continue
+ }
+
+ textEdits[i] = analysis.TextEdit{
+ Pos: m.Pos,
+ End: m.End + 1, // Add 1 to position to include the new line
+ NewText: nil,
+ }
+ }
+
+ return analysis.Diagnostic{
+ Pos: field.Pos(),
+ Message: fmt.Sprintf("%s should use marker %s instead of %s", prefix, primaryMarker, secondaryMarker),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: fmt.Sprintf("should replace `%s` with `%s`", secondaryMarker, primaryMarker),
+ TextEdits: textEdits,
+ },
+ },
+ }
+}
+
+func reportShouldRemoveSecondaryMarker(field *ast.Field, marker []markers.Marker, primaryMarker, secondaryMarker, prefix string) analysis.Diagnostic {
+ textEdits := make([]analysis.TextEdit, len(marker))
+
+ for i, m := range marker {
+ textEdits[i] = analysis.TextEdit{
+ Pos: m.Pos,
+ End: m.End + 1, // Add 1 to position to include the new line
+ NewText: nil,
+ }
+ }
+
+ return analysis.Diagnostic{
+ Pos: field.Pos(),
+ Message: fmt.Sprintf("%s should use only the marker %s, %s is not required", prefix, primaryMarker, secondaryMarker),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: fmt.Sprintf("should remove `// +%s`", secondaryMarker),
+ TextEdits: textEdits,
+ },
+ },
+ }
+}
+
+func defaultConfig(cfg *config.OptionalOrRequiredConfig) {
+ if cfg.PreferredOptionalMarker == "" {
+ cfg.PreferredOptionalMarker = OptionalMarker
+ }
+
+ if cfg.PreferredRequiredMarker == "" {
+ cfg.PreferredRequiredMarker = RequiredMarker
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/doc.go
new file mode 100644
index 0000000000..2898daf6d8
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/doc.go
@@ -0,0 +1,24 @@
+/*
+optionalorrequired is a linter to ensure that all fields are marked as either optional or required.
+By default, it searches for the `+optional` and `+required` markers, and ensures that all fields are marked
+with at least one of these markers.
+
+The linter can be configured to use different markers, by setting the `PreferredOptionalMarker` and `PreferredRequiredMarker`.
+The default values are `+optional` and `+required`, respectively.
+The available alternate values for each marker are:
+
+For PreferredOptionalMarker:
+ - `+optional`: The standard Kubernetes marker for optional fields.
+ - `+kubebuilder:validation:Optional`: The Kubebuilder marker for optional fields.
+
+For PreferredRequiredMarker:
+ - `+required`: The standard Kubernetes marker for required fields.
+ - `+kubebuilder:validation:Required`: The Kubebuilder marker for required fields.
+
+When a field is marked with both the Kubernetes and Kubebuilder markers, the linter will suggest to remove the Kubebuilder marker.
+When a field is marked only with the Kubebuilder marker, the linter will suggest to use the Kubernetes marker instead.
+This behaviour is reversed when the `PreferredOptionalMarker` and `PreferredRequiredMarker` are set to the Kubebuilder markers.
+
+Use the linter fix option to automatically apply suggested fixes.
+*/
+package optionalorrequired
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/initializer.go
new file mode 100644
index 0000000000..6d671628c9
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/initializer.go
@@ -0,0 +1,30 @@
+package optionalorrequired
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return newAnalyzer(cfg.OptionalOrRequired), nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/registry.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/registry.go
new file mode 100644
index 0000000000..eda7daeefa
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/registry.go
@@ -0,0 +1,122 @@
+package analysis
+
+import (
+ "fmt"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/commentstart"
+ "github.com/JoelSpeed/kal/pkg/analysis/conditions"
+ "github.com/JoelSpeed/kal/pkg/analysis/integers"
+ "github.com/JoelSpeed/kal/pkg/analysis/jsontags"
+ "github.com/JoelSpeed/kal/pkg/analysis/maxlength"
+ "github.com/JoelSpeed/kal/pkg/analysis/nobools"
+ "github.com/JoelSpeed/kal/pkg/analysis/nophase"
+ "github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired"
+ "github.com/JoelSpeed/kal/pkg/analysis/requiredfields"
+ "github.com/JoelSpeed/kal/pkg/analysis/statussubresource"
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// AnalyzerInitializer is used to intializer analyzers.
+type AnalyzerInitializer interface {
+ // Name returns the name of the analyzer initialized by this intializer.
+ Name() string
+
+ // Init returns the newly initialized analyzer.
+ // It will be passed the complete LintersConfig and is expected to rely only on its own configuration.
+ Init(config.LintersConfig) (*analysis.Analyzer, error)
+
+ // Default determines whether the inializer intializes an analyzer that should be
+ // on by default, or not.
+ Default() bool
+}
+
+// Registry is used to fetch and initialize analyzers.
+type Registry interface {
+ // DefaultLinters returns the names of linters that are enabled by default.
+ DefaultLinters() sets.Set[string]
+
+ // AllLinters returns the names of all registered linters.
+ AllLinters() sets.Set[string]
+
+ // InitializeLinters returns a set of newly initialized linters based on the
+ // provided configuration.
+ InitializeLinters(config.Linters, config.LintersConfig) ([]*analysis.Analyzer, error)
+}
+
+type registry struct {
+ initializers []AnalyzerInitializer
+}
+
+// NewRegistry returns a new registry, from which analyzers can be fetched.
+func NewRegistry() Registry {
+ return ®istry{
+ initializers: []AnalyzerInitializer{
+ conditions.Initializer(),
+ commentstart.Initializer(),
+ integers.Initializer(),
+ jsontags.Initializer(),
+ maxlength.Initializer(),
+ nobools.Initializer(),
+ nophase.Initializer(),
+ optionalorrequired.Initializer(),
+ requiredfields.Initializer(),
+ statussubresource.Initializer(),
+ },
+ }
+}
+
+// DefaultLinters returns the list of linters that are registered
+// as being enabled by default.
+func (r *registry) DefaultLinters() sets.Set[string] {
+ defaultLinters := sets.New[string]()
+
+ for _, initializer := range r.initializers {
+ if initializer.Default() {
+ defaultLinters.Insert(initializer.Name())
+ }
+ }
+
+ return defaultLinters
+}
+
+// AllLinters returns the list of all known linters that are known
+// to the registry.
+func (r *registry) AllLinters() sets.Set[string] {
+ linters := sets.New[string]()
+
+ for _, initializer := range r.initializers {
+ linters.Insert(initializer.Name())
+ }
+
+ return linters
+}
+
+// InitializeLinters returns a list of initialized linters based on the provided config.
+func (r *registry) InitializeLinters(cfg config.Linters, lintersCfg config.LintersConfig) ([]*analysis.Analyzer, error) {
+ analyzers := []*analysis.Analyzer{}
+ errs := []error{}
+
+ enabled := sets.New(cfg.Enable...)
+ disabled := sets.New(cfg.Disable...)
+
+ allEnabled := enabled.Len() == 1 && enabled.Has(config.Wildcard)
+ allDisabled := disabled.Len() == 1 && disabled.Has(config.Wildcard)
+
+ for _, initializer := range r.initializers {
+ if !disabled.Has(initializer.Name()) && (allEnabled || enabled.Has(initializer.Name()) || !allDisabled && initializer.Default()) {
+ a, err := initializer.Init(lintersCfg)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to initialize linter %s: %w", initializer.Name(), err))
+ continue
+ }
+
+ analyzers = append(analyzers, a)
+ }
+ }
+
+ return analyzers, kerrors.NewAggregate(errs)
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/analyzer.go
new file mode 100644
index 0000000000..6aa50c3407
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/analyzer.go
@@ -0,0 +1,157 @@
+package requiredfields
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "strings"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers"
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ name = "requiredfields"
+
+ requiredMarker = "required"
+ kubebuilderRequiredMarker = "kubebuilder:validation:Required"
+)
+
+func init() {
+ markers.DefaultRegistry().Register(requiredMarker, kubebuilderRequiredMarker)
+}
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetMarkers = errors.New("could not get markers")
+ errCouldNotGetJSONTags = errors.New("could not get json tags")
+)
+
+type analyzer struct {
+ pointerPolicy config.RequiredFieldPointerPolicy
+}
+
+// newAnalyzer creates a new analyzer.
+func newAnalyzer(cfg config.RequiredFieldsConfig) *analysis.Analyzer {
+ defaultConfig(&cfg)
+
+ a := &analyzer{
+ pointerPolicy: cfg.PointerPolicy,
+ }
+
+ return &analysis.Analyzer{
+ Name: name,
+ Doc: "Checks that all required fields are not pointers, and do not have the omitempty tag.",
+ Run: a.run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer},
+ }
+}
+
+func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers)
+ if !ok {
+ return nil, errCouldNotGetMarkers
+ }
+
+ jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+ if !ok {
+ return nil, errCouldNotGetJSONTags
+ }
+
+ // Filter to fields so that we can iterate over fields in a struct.
+ nodeFilter := []ast.Node{
+ (*ast.Field)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ field, ok := n.(*ast.Field)
+ if !ok {
+ return
+ }
+
+ fieldMarkers := markersAccess.FieldMarkers(field)
+ fieldTagInfo := jsonTags.FieldTags(field)
+
+ a.checkField(pass, field, fieldMarkers, fieldTagInfo)
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func (a *analyzer) checkField(pass *analysis.Pass, field *ast.Field, fieldMarkers markers.MarkerSet, fieldTagInfo extractjsontags.FieldTagInfo) {
+ if field == nil || len(field.Names) == 0 {
+ return
+ }
+
+ fieldName := field.Names[0].Name
+
+ if !fieldMarkers.Has(requiredMarker) && !fieldMarkers.Has(kubebuilderRequiredMarker) {
+ // The field is not marked required, so we don't need to check it.
+ return
+ }
+
+ if fieldTagInfo.OmitEmpty {
+ pass.Report(analysis.Diagnostic{
+ Pos: field.Pos(),
+ Message: fmt.Sprintf("field %s is marked as required, but has the omitempty tag", fieldName),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: "should remove the omitempty tag",
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: fieldTagInfo.Pos,
+ End: fieldTagInfo.End,
+ NewText: []byte(strings.Replace(fieldTagInfo.RawValue, ",omitempty", "", 1)),
+ },
+ },
+ },
+ },
+ })
+ }
+
+ if field.Type == nil {
+ // The field has no type? We can't check if it's a pointer.
+ return
+ }
+
+ if starExpr, ok := field.Type.(*ast.StarExpr); ok {
+ var suggestedFixes []analysis.SuggestedFix
+
+ switch a.pointerPolicy {
+ case config.RequiredFieldPointerWarn:
+ // Do not suggest a fix.
+ case config.RequiredFieldPointerSuggestFix:
+ suggestedFixes = append(suggestedFixes, analysis.SuggestedFix{
+ Message: "should remove the pointer",
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: starExpr.Pos(),
+ End: starExpr.X.Pos(),
+ NewText: nil,
+ },
+ },
+ })
+ }
+
+ pass.Report(analysis.Diagnostic{
+ Pos: field.Pos(),
+ Message: fmt.Sprintf("field %s is marked as required, should not be a pointer", fieldName),
+ SuggestedFixes: suggestedFixes,
+ })
+ }
+}
+
+func defaultConfig(cfg *config.RequiredFieldsConfig) {
+ if cfg.PointerPolicy == "" {
+ cfg.PointerPolicy = config.RequiredFieldPointerSuggestFix
+ }
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/doc.go
new file mode 100644
index 0000000000..9eb82f368c
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/doc.go
@@ -0,0 +1,11 @@
+/*
+requiredFields is a linter to check that fields that are marked as required are not pointers, and do not have the omitempty tag.
+The linter will check for fields that are marked as required using the +required marker, or the +kubebuilder:validation:Required marker.
+
+The linter will suggest to remove the omitempty tag from fields that are marked as required, but have the omitempty tag.
+The linter will suggest to remove the pointer type from fields that are marked as required.
+
+If you have a large, existing codebase, you may not want to automatically fix all of the pointer issues.
+In this case, you can configure the linter not to suggest fixing the pointer issues by setting the `pointerPolicy` option to `Warn`.
+*/
+package requiredfields
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/initializer.go
new file mode 100644
index 0000000000..30779c00ad
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/initializer.go
@@ -0,0 +1,30 @@
+package requiredfields
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return newAnalyzer(cfg.RequiredFields), nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/analyzer.go
new file mode 100644
index 0000000000..68b5e02335
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/analyzer.go
@@ -0,0 +1,151 @@
+package statussubresource
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags"
+ "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ name = "statussubresource"
+
+ statusJSONTag = "status"
+
+ // kubebuilderRootMarker is the marker that indicates that a struct is the object root for code and CRD generation.
+ kubebuilderRootMarker = "kubebuilder:object:root:=true"
+
+ // kubebuilderStatusSubresourceMarker is the marker that indicates that the CRD generated for a struct should include the /status subresource.
+ kubebuilderStatusSubresourceMarker = "kubebuilder:subresource:status"
+)
+
+var (
+ errCouldNotGetInspector = errors.New("could not get inspector")
+ errCouldNotGetMarkers = errors.New("could not get markers")
+ errCouldNotGetJSONTags = errors.New("could not get json tags")
+)
+
+type analyzer struct{}
+
+// newAnalyzer creates a new analyzer with the given configuration.
+func newAnalyzer() *analysis.Analyzer {
+ a := &analyzer{}
+
+ return &analysis.Analyzer{
+ Name: name,
+ Doc: "Checks that a type marked with kubebuilder:object:root:=true and containing a status field is marked with kubebuilder:subresource:status",
+ Run: a.run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer},
+ }
+}
+
+func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
+ inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, errCouldNotGetInspector
+ }
+
+ markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers)
+ if !ok {
+ return nil, errCouldNotGetMarkers
+ }
+
+ jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags)
+ if !ok {
+ return nil, errCouldNotGetJSONTags
+ }
+
+ // Filter to type specs so we can get the names of types
+ nodeFilter := []ast.Node{
+ (*ast.TypeSpec)(nil),
+ }
+
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ typeSpec, ok := n.(*ast.TypeSpec)
+ if !ok {
+ return
+ }
+
+ // we only care about struct types
+ sTyp, ok := typeSpec.Type.(*ast.StructType)
+ if !ok {
+ return
+ }
+
+ // no identifier on the type
+ if typeSpec.Name == nil {
+ return
+ }
+
+ structMarkers := markersAccess.StructMarkers(sTyp)
+ a.checkStruct(pass, sTyp, typeSpec.Name.Name, structMarkers, jsonTags)
+ })
+
+ return nil, nil //nolint:nilnil
+}
+
+func (a *analyzer) checkStruct(pass *analysis.Pass, sTyp *ast.StructType, name string, structMarkers markers.MarkerSet, jsonTags extractjsontags.StructFieldTags) {
+ if sTyp == nil {
+ return
+ }
+
+ if !structMarkers.HasWithValue(kubebuilderRootMarker) {
+ return
+ }
+
+ hasStatusSubresourceMarker := structMarkers.Has(kubebuilderStatusSubresourceMarker)
+ hasStatusField := hasStatusField(sTyp, jsonTags)
+
+ switch {
+ case (hasStatusSubresourceMarker && hasStatusField), (!hasStatusSubresourceMarker && !hasStatusField):
+ // acceptable state
+ case hasStatusSubresourceMarker && !hasStatusField:
+ // Might be able to have some suggested fixes here, but it is likely much more complex
+ // so for now leave it with a descriptive failure message.
+ pass.Reportf(sTyp.Pos(), "root object type %q is marked to enable the status subresource with marker %q but has no status field", name, kubebuilderStatusSubresourceMarker)
+ case !hasStatusSubresourceMarker && hasStatusField:
+ // In this case we can suggest the autofix to add the status subresource marker
+ pass.Report(analysis.Diagnostic{
+ Pos: sTyp.Pos(),
+ Message: fmt.Sprintf("root object type %q has a status field but does not have the marker %q to enable the status subresource", name, kubebuilderStatusSubresourceMarker),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: "should add the kubebuilder:subresource:status marker",
+ TextEdits: []analysis.TextEdit{
+ // go one line above the struct and add the marker
+ {
+ // sTyp.Pos() is the beginning of the 'struct' keyword. Subtract
+ // the length of the struct name + 7 (2 for spaces surrounding type name, 4 for the 'type' keyword,
+ // and 1 for the newline) to position at the end of the line above the struct
+ // definition.
+ Pos: sTyp.Pos() - token.Pos(len(name)+7),
+ // prefix with a newline to ensure we aren't appending to a previous comment
+ NewText: []byte("\n// +kubebuilder:subresource:status"),
+ },
+ },
+ },
+ },
+ })
+ }
+}
+
+func hasStatusField(sTyp *ast.StructType, jsonTags extractjsontags.StructFieldTags) bool {
+ if sTyp == nil || sTyp.Fields == nil || sTyp.Fields.List == nil {
+ return false
+ }
+
+ for _, field := range sTyp.Fields.List {
+ info := jsonTags.FieldTags(field)
+ if info.Name == statusJSONTag {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/doc.go
new file mode 100644
index 0000000000..b601b65301
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/doc.go
@@ -0,0 +1,10 @@
+// statussubresource is a linter to check that the status subresource is configured correctly for
+// structs marked with the 'kubebuilder:object:root:=true' marker. Correct configuration is that
+// when there is a status field the 'kubebuilder:subresource:status' marker is present on the struct
+// OR when the 'kubebuilder:subresource:status' marker is present on the struct there is a status field.
+//
+// In the case where there is a status field present but no 'kubebuilder:subresource:status' marker, the
+// linter will suggest adding the comment '// +kubebuilder:subresource:status' above the struct.
+//
+// This linter is not enabled by default as it is only applicable to CustomResourceDefinitions.
+package statussubresource
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/initializer.go
new file mode 100644
index 0000000000..6a03facdd1
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/initializer.go
@@ -0,0 +1,31 @@
+package statussubresource
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Initializer returns the AnalyzerInitializer for this
+// Analyzer so that it can be added to the registry.
+func Initializer() initializer {
+ return initializer{}
+}
+
+// intializer implements the AnalyzerInitializer interface.
+type initializer struct{}
+
+// Name returns the name of the Analyzer.
+func (initializer) Name() string {
+ return name
+}
+
+// Init returns the intialized Analyzer.
+func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) {
+ return newAnalyzer(), nil
+}
+
+// Default determines whether this Analyzer is on by default, or not.
+func (initializer) Default() bool {
+ // This check only applies to CRDs so should not be on by default.
+ return false
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/utils/type_check.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/utils/type_check.go
new file mode 100644
index 0000000000..4cf7c2f30c
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/utils/type_check.go
@@ -0,0 +1,101 @@
+package utils
+
+import (
+ "fmt"
+ "go/ast"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+// TypeChecker is an interface for checking types in an AST.
+// It is used to check the underlying, built-in types of fields within structs, and raw type declarations.
+// It is up to the implementer to provide a function that will be called when a built-in type is found,
+// and to provide the logic for providing analysis about this type.
+type TypeChecker interface {
+ CheckNode(pass *analysis.Pass, node ast.Node)
+}
+
+// NewTypeChecker returns a new TypeChecker with the provided checkFunc.
+func NewTypeChecker(checkFunc func(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string)) TypeChecker {
+ return &typeChecker{
+ checkFunc: checkFunc,
+ }
+}
+
+type typeChecker struct {
+ checkFunc func(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string)
+}
+
+// CheckNode checks the provided node for built-in types.
+// It will iterate through fields within structs, and raw type declarations.
+// Calling the checkFunc when a built-in type is found.
+func (t *typeChecker) CheckNode(pass *analysis.Pass, node ast.Node) {
+ switch n := node.(type) {
+ case *ast.StructType:
+ if n.Fields == nil {
+ return
+ }
+
+ for _, field := range n.Fields.List {
+ t.checkField(pass, field)
+ }
+ case *ast.Field:
+ t.checkField(pass, n)
+ case *ast.TypeSpec:
+ t.checkTypeSpec(pass, n, node, "type")
+ }
+}
+
+func (t *typeChecker) checkField(pass *analysis.Pass, field *ast.Field) {
+ if field == nil || len(field.Names) == 0 || field.Names[0] == nil {
+ return
+ }
+
+ fieldName := field.Names[0].Name
+ prefix := fmt.Sprintf("field %s", fieldName)
+
+ t.checkTypeExpr(pass, field.Type, field, prefix)
+}
+
+func (t *typeChecker) checkTypeSpec(pass *analysis.Pass, tSpec *ast.TypeSpec, node ast.Node, prefix string) {
+ if tSpec.Name == nil {
+ return
+ }
+
+ typeName := tSpec.Name.Name
+ prefix = fmt.Sprintf("%s %s", prefix, typeName)
+
+ t.checkTypeExpr(pass, tSpec.Type, node, prefix)
+}
+
+func (t *typeChecker) checkTypeExpr(pass *analysis.Pass, typeExpr ast.Expr, node ast.Node, prefix string) {
+ switch typ := typeExpr.(type) {
+ case *ast.Ident:
+ t.checkIdent(pass, typ, node, prefix)
+ case *ast.StarExpr:
+ t.checkTypeExpr(pass, typ.X, node, fmt.Sprintf("%s pointer", prefix))
+ case *ast.ArrayType:
+ t.checkTypeExpr(pass, typ.Elt, node, fmt.Sprintf("%s array element", prefix))
+ case *ast.MapType:
+ t.checkTypeExpr(pass, typ.Key, node, fmt.Sprintf("%s map key", prefix))
+ t.checkTypeExpr(pass, typ.Value, node, fmt.Sprintf("%s map value", prefix))
+ }
+}
+
+// checkIdent calls the checkFunc with the ident, when we have hit a built-in type.
+// If the ident is not a built in, we look at the underlying type until we hit a built-in type.
+func (t *typeChecker) checkIdent(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) {
+ if ident.Obj == nil || ident.Obj.Decl == nil {
+ // We've hit a built-in type, no need to check further.
+ t.checkFunc(pass, ident, node, prefix)
+ return
+ }
+
+ tSpec, ok := ident.Obj.Decl.(*ast.TypeSpec)
+ if !ok {
+ return
+ }
+
+ // The field is using a type alias, check if the alias is an int.
+ t.checkTypeSpec(pass, tSpec, node, fmt.Sprintf("%s type", prefix))
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/config.go
new file mode 100644
index 0000000000..9e83bf4e1a
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/config.go
@@ -0,0 +1,12 @@
+package config
+
+// GolangCIConfig is the complete configuration for the KAL
+// linter when built as an integration into golangci-lint.
+type GolangCIConfig struct {
+ // Linters allows the user to configure which linters should,
+ // and should not be enabled.
+ Linters Linters `mapstructure:"linters"`
+
+ // LintersConfig contains configuration for individual linters.
+ LintersConfig LintersConfig `mapstructure:"lintersConfig"`
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters.go
new file mode 100644
index 0000000000..237f1ea298
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters.go
@@ -0,0 +1,25 @@
+package config
+
+const (
+ // Wildcard is used to imply all linters should be enabled/disabled.
+ Wildcard = "*"
+)
+
+// Linters allows the user to configure which linters should, and
+// should not be enabled.
+type Linters struct {
+ // Enable is used to enable specific linters.
+ // Use '*' to enable all known linters.
+ // When using '*', it should be the only value in the list.
+ // Values in this list will be added to the linters enabled by default.
+ // Values should not appear in both 'enable' and 'disable'.
+ Enable []string `mapstructure:"enable"`
+
+ // Disable is used to disable specific linters.
+ // Use '*' to disable all known linters. When all linters are disabled,
+ // only those explicitly called out in 'Enable' will be enabled.
+ // When using '*', it should be the only value in the list.
+ // Values in this list will be added to the linters disabled by default.
+ // Values should not appear in both 'enable' and 'disable'.
+ Disable []string `mapstructure:"disable"`
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters_config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters_config.go
new file mode 100644
index 0000000000..b0987c1ee9
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters_config.go
@@ -0,0 +1,101 @@
+package config
+
+// LintersConfig contains configuration for individual linters.
+type LintersConfig struct {
+ // conditions contains configuration for the conditions linter.
+ Conditions ConditionsConfig `json:"conditions"`
+
+ // jsonTags contains configuration for the jsontags linter.
+ JSONTags JSONTagsConfig `json:"jsonTags"`
+
+ // optionalOrRequired contains configuration for the optionalorrequired linter.
+ OptionalOrRequired OptionalOrRequiredConfig `json:"optionalOrRequired"`
+
+ // requiredFields contains configuration for the requiredfields linter.
+ RequiredFields RequiredFieldsConfig `json:"requiredFields"`
+}
+
+// ConditionsFirstField is the policy for the conditions linter.
+type ConditionsFirstField string
+
+const (
+ // ConditionsFirstFieldWarn indicates that the conditions should be the first field in the struct.
+ ConditionsFirstFieldWarn ConditionsFirstField = "Warn"
+
+ // ConditionsFirstFieldIgnore indicates that the conditions do not need to be the first field in the struct.
+ ConditionsFirstFieldIgnore ConditionsFirstField = "Ignore"
+)
+
+// ConditionsUseProtobuf is the policy for the conditions linter.
+type ConditionsUseProtobuf string
+
+const (
+ // ConditionsUseProtobufSuggestFix indicates that the linter will emit a warning if the conditions are not using protobuf tags and suggest a fix.
+ ConditionsUseProtobufSuggestFix ConditionsUseProtobuf = "SuggestFix"
+
+ // ConditionsUseProtobufWarn indicates that the linter will emit a warning if the conditions are not using protobuf tags.
+ ConditionsUseProtobufWarn ConditionsUseProtobuf = "Warn"
+
+ // ConditionsUseProtobufIgnore indicates that the linter will not emit a warning if the conditions are not using protobuf tags.
+ ConditionsUseProtobufIgnore ConditionsUseProtobuf = "Ignore"
+)
+
+// ConditionsConfig contains configuration for the conditions linter.
+type ConditionsConfig struct {
+ // isFirstField indicates whether the conditions should be the first field in the struct.
+ // Valid values are Warn and Ignore.
+ // When set to Warn, the linter will emit a warning if the conditions are not the first field in the struct.
+ // When set to Ignore, the linter will not emit a warning if the conditions are not the first field in the struct.
+ // When otherwise not specified, the default value is Warn.
+ IsFirstField ConditionsFirstField `json:"isFirstField"`
+
+ // useProtobuf indicates whether the linter should use protobuf tags.
+ // Valid values are SuggestFix, Warn and Ignore.
+ // When set to SuggestFix, the linter will emit a warning if the conditions are not using protobuf tags and suggest a fix.
+ // When set to Warn, the linter will emit a warning if the conditions are not using protobuf tags.
+ // When set to Ignore, the linter will not emit a warning if the conditions are not using protobuf tags.
+ // When otherwise not specified, the default value is SuggestFix.
+ UseProtobuf ConditionsUseProtobuf `json:"useProtobuf"`
+}
+
+// JSONTagsConfig contains configuration for the jsontags linter.
+type JSONTagsConfig struct {
+ // jsonTagRegex is the regular expression used to validate that json tags are in a particular format.
+ // By default, the regex used is "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" and is used to check for
+ // camel case like string.
+ JSONTagRegex string `json:"jsonTagRegex"`
+}
+
+// OptionalOrRequiredConfig contains configuration for the optionalorrequired linter.
+type OptionalOrRequiredConfig struct {
+ // preferredOptionalMarker is the preferred marker to use for optional fields.
+ // If this field is not set, the default value is "optional".
+ // Valid values are "optional" and "kubebuilder:validation:Optional".
+ PreferredOptionalMarker string `json:"preferredOptionalMarker"`
+
+ // preferredRequiredMarker is the preferred marker to use for required fields.
+ // If this field is not set, the default value is "required".
+ // Valid values are "required" and "kubebuilder:validation:Required".
+ PreferredRequiredMarker string `json:"preferredRequiredMarker"`
+}
+
+// RequiredFieldPointerPolicy is the policy for pointers in required fields.
+type RequiredFieldPointerPolicy string
+
+const (
+ // RequiredFieldPointerWarn indicates that the linter will emit a warning if a required field is a pointer.
+ RequiredFieldPointerWarn RequiredFieldPointerPolicy = "Warn"
+
+ // RequiredFieldPointerSuggestFix indicates that the linter will emit a warning if a required field is a pointer and suggest a fix.
+ RequiredFieldPointerSuggestFix RequiredFieldPointerPolicy = "SuggestFix"
+)
+
+// RequiredFieldsConfig contains configuration for the requiredfields linter.
+type RequiredFieldsConfig struct {
+ // pointerPolicy is the policy for pointers in required fields.
+ // Valid values are "Warn" and "SuggestFix".
+ // When set to "Warn", the linter will emit a warning if a required field is a pointer.
+ // When set to "SuggestFix", the linter will emit a warning if a required field is a pointer and suggest a fix.
+ // When otherwise not specified, the default value is "SuggestFix".
+ PointerPolicy RequiredFieldPointerPolicy `json:"pointerPolicy"`
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/config.go
new file mode 100644
index 0000000000..37a8bcdf15
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/config.go
@@ -0,0 +1,21 @@
+package validation
+
+import (
+ "github.com/JoelSpeed/kal/pkg/config"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// ValidateGolangCIConfig is used to validate the provided configuration once
+// extracted from golangci-lint.
+func ValidateGolangCIConfig(g config.GolangCIConfig, fldPath *field.Path) error {
+ if fldPath == nil {
+ fldPath = field.NewPath("")
+ }
+
+ var fieldErrors field.ErrorList
+
+ fieldErrors = append(fieldErrors, ValidateLinters(g.Linters, fldPath.Child("linters"))...)
+ fieldErrors = append(fieldErrors, ValidateLintersConfig(g.LintersConfig, fldPath.Child("lintersConfig"))...)
+
+ return fieldErrors.ToAggregate()
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters.go
new file mode 100644
index 0000000000..e73fa2d95d
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters.go
@@ -0,0 +1,49 @@
+package validation
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/JoelSpeed/kal/pkg/analysis"
+ "github.com/JoelSpeed/kal/pkg/config"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// ValidateLinters is used to validate the configuration in the config.Linters struct.
+//
+//nolint:cyclop
+func ValidateLinters(l config.Linters, fldPath *field.Path) field.ErrorList {
+ fieldErrors := field.ErrorList{}
+
+ enable := sets.New(l.Enable...)
+ enablePath := fldPath.Child("enable")
+
+ switch {
+ case len(enable) != len(l.Enable):
+ fieldErrors = append(fieldErrors, field.Invalid(enablePath, l.Enable, "values in 'enable' must be unique"))
+ case enable.Has(config.Wildcard) && enable.Len() != 1:
+ fieldErrors = append(fieldErrors, field.Invalid(enablePath, l.Enable, "wildcard ('*') must not be specified with other values"))
+ case !enable.Has(config.Wildcard) && enable.Difference(analysis.NewRegistry().AllLinters()).Len() > 0:
+ fieldErrors = append(fieldErrors, field.Invalid(enablePath, l.Enable, fmt.Sprintf("unknown linters: %s", strings.Join(enable.Difference(analysis.NewRegistry().AllLinters()).UnsortedList(), ","))))
+ }
+
+ disable := sets.New(l.Disable...)
+ disablePath := fldPath.Child("disable")
+
+ switch {
+ case len(disable) != len(l.Disable):
+ fieldErrors = append(fieldErrors, field.Invalid(disablePath, l.Disable, "values in 'disable' must be unique"))
+ case disable.Has(config.Wildcard) && disable.Len() != 1:
+ fieldErrors = append(fieldErrors, field.Invalid(disablePath, l.Disable, "wildcard ('*') must not be specified with other values"))
+ case !disable.Has(config.Wildcard) && disable.Difference(analysis.NewRegistry().AllLinters()).Len() > 0:
+ fieldErrors = append(fieldErrors, field.Invalid(disablePath, l.Disable, fmt.Sprintf("unknown linters: %s", strings.Join(disable.Difference(analysis.NewRegistry().AllLinters()).UnsortedList(), ","))))
+ }
+
+ if enable.Intersection(disable).Len() > 0 {
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath, l, fmt.Sprintf("values in 'enable' and 'disable may not overlap, overlapping values: %s", strings.Join(enable.Intersection(disable).UnsortedList(), ","))))
+ }
+
+ return fieldErrors
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters_config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters_config.go
new file mode 100644
index 0000000000..591bb8e343
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters_config.go
@@ -0,0 +1,87 @@
+package validation
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired"
+ "github.com/JoelSpeed/kal/pkg/config"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// ValidateLintersConfig is used to validate the configuration in the config.LintersConfig struct.
+func ValidateLintersConfig(lc config.LintersConfig, fldPath *field.Path) field.ErrorList {
+ fieldErrors := field.ErrorList{}
+
+ fieldErrors = append(fieldErrors, validateConditionsConfig(lc.Conditions, fldPath.Child("conditions"))...)
+ fieldErrors = append(fieldErrors, validateJSONTagsConfig(lc.JSONTags, fldPath.Child("jsonTags"))...)
+ fieldErrors = append(fieldErrors, validateOptionalOrRequiredConfig(lc.OptionalOrRequired, fldPath.Child("optionalOrRequired"))...)
+ fieldErrors = append(fieldErrors, validateRequiredFieldsConfig(lc.RequiredFields, fldPath.Child("requiredFields"))...)
+
+ return fieldErrors
+}
+
+// validateConditionsConfig is used to validate the configuration in the config.ConditionsConfig struct.
+func validateConditionsConfig(cc config.ConditionsConfig, fldPath *field.Path) field.ErrorList {
+ fieldErrors := field.ErrorList{}
+
+ switch cc.IsFirstField {
+ case "", config.ConditionsFirstFieldWarn, config.ConditionsFirstFieldIgnore:
+ default:
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("isFirstField"), cc.IsFirstField, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", config.ConditionsFirstFieldWarn, config.ConditionsFirstFieldIgnore)))
+ }
+
+ switch cc.UseProtobuf {
+ case "", config.ConditionsUseProtobufSuggestFix, config.ConditionsUseProtobufWarn, config.ConditionsUseProtobufIgnore:
+ default:
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("useProtobuf"), cc.UseProtobuf, fmt.Sprintf("invalid value, must be one of %q, %q, %q or omitted", config.ConditionsUseProtobufSuggestFix, config.ConditionsUseProtobufWarn, config.ConditionsUseProtobufIgnore)))
+ }
+
+ return fieldErrors
+}
+
+// validateJSONTagsConfig is used to validate the configuration in the config.JSONTagsConfig struct.
+func validateJSONTagsConfig(jtc config.JSONTagsConfig, fldPath *field.Path) field.ErrorList {
+ fieldErrors := field.ErrorList{}
+
+ if jtc.JSONTagRegex != "" {
+ if _, err := regexp.Compile(jtc.JSONTagRegex); err != nil {
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("jsonTagRegex"), jtc.JSONTagRegex, fmt.Sprintf("invalid regex: %v", err)))
+ }
+ }
+
+ return fieldErrors
+}
+
+// validateOptionalOrRequiredConfig is used to validate the configuration in the config.OptionalOrRequiredConfig struct.
+func validateOptionalOrRequiredConfig(oorc config.OptionalOrRequiredConfig, fldPath *field.Path) field.ErrorList {
+ fieldErrors := field.ErrorList{}
+
+ switch oorc.PreferredOptionalMarker {
+ case "", optionalorrequired.OptionalMarker, optionalorrequired.KubebuilderOptionalMarker:
+ default:
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("preferredOptionalMarker"), oorc.PreferredOptionalMarker, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", optionalorrequired.OptionalMarker, optionalorrequired.KubebuilderOptionalMarker)))
+ }
+
+ switch oorc.PreferredRequiredMarker {
+ case "", optionalorrequired.RequiredMarker, optionalorrequired.KubebuilderRequiredMarker:
+ default:
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("preferredRequiredMarker"), oorc.PreferredRequiredMarker, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", optionalorrequired.RequiredMarker, optionalorrequired.KubebuilderRequiredMarker)))
+ }
+
+ return fieldErrors
+}
+
+// validateRequiredFieldsConfig is used to validate the configuration in the config.RequiredFieldsConfig struct.
+func validateRequiredFieldsConfig(rfc config.RequiredFieldsConfig, fldPath *field.Path) field.ErrorList {
+ fieldErrors := field.ErrorList{}
+
+ switch rfc.PointerPolicy {
+ case "", config.RequiredFieldPointerWarn, config.RequiredFieldPointerSuggestFix:
+ default:
+ fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("pointerPolicy"), rfc.PointerPolicy, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", config.RequiredFieldPointerWarn, config.RequiredFieldPointerSuggestFix)))
+ }
+
+ return fieldErrors
+}
diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/plugin.go b/hack/tools/vendor/github.com/JoelSpeed/kal/plugin.go
new file mode 100644
index 0000000000..ef77aead5c
--- /dev/null
+++ b/hack/tools/vendor/github.com/JoelSpeed/kal/plugin.go
@@ -0,0 +1,55 @@
+package kal
+
+import (
+ "fmt"
+
+ kalanalysis "github.com/JoelSpeed/kal/pkg/analysis"
+ "github.com/JoelSpeed/kal/pkg/config"
+ "github.com/JoelSpeed/kal/pkg/validation"
+ "github.com/golangci/plugin-module-register/register"
+ "golang.org/x/tools/go/analysis"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+func init() {
+ register.Plugin("kal", New)
+}
+
+// New creates a new golangci-lint plugin based on the KAL analyzers.
+func New(settings any) (register.LinterPlugin, error) {
+ s, err := register.DecodeSettings[config.GolangCIConfig](settings)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding settings: %w", err)
+ }
+
+ return &GolangCIPlugin{config: s}, nil
+}
+
+// GolangCIPlugin constructs a new plugin for the golangci-lint
+// plugin pattern.
+// This allows golangci-lint to build a version of itself, containing
+// all of the anaylzers included in KAL.
+type GolangCIPlugin struct {
+ config config.GolangCIConfig
+}
+
+// BuildAnalyzers returns all of the analyzers to run, based on the configuration.
+func (f *GolangCIPlugin) BuildAnalyzers() ([]*analysis.Analyzer, error) {
+ if err := validation.ValidateGolangCIConfig(f.config, field.NewPath("")); err != nil {
+ return nil, fmt.Errorf("error in KAL configuration: %w", err)
+ }
+
+ registry := kalanalysis.NewRegistry()
+
+ analyzers, err := registry.InitializeLinters(f.config.Linters, f.config.LintersConfig)
+ if err != nil {
+ return nil, fmt.Errorf("error initializing analyzers: %w", err)
+ }
+
+ return analyzers, nil
+}
+
+// GetLoadMode implements the golangci-lint plugin interface.
+func (f *GolangCIPlugin) GetLoadMode() string {
+ return register.LoadModeTypesInfo
+}
diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml
new file mode 100644
index 0000000000..758ae1a9e4
--- /dev/null
+++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml
@@ -0,0 +1,92 @@
+run:
+ tests: true
+
+output:
+ print-issued-lines: false
+
+linters:
+ enable-all: true
+ disable:
+ - cyclop
+ - depguard
+ - dupl
+ - dupword
+ - err113
+ - errorlint
+ - exhaustive
+ - exhaustruct
+ - exportloopref
+ - forcetypeassert
+ - funlen
+ - gci
+ - gochecknoglobals
+ - gocognit
+ - goconst
+ - gocyclo
+ - godot
+ - godox
+ - gofumpt
+ - govet
+ - ireturn
+ - lll
+ - maintidx
+ - mnd
+ - mnd
+ - musttag
+ - nestif
+ - nilnil
+ - nlreturn
+ - nolintlint
+ - nonamedreturns
+ - paralleltest
+ - perfsprint
+ - predeclared
+ - revive
+ - stylecheck
+ - testableexamples
+ - testpackage
+ - thelper
+ - varnamelen
+ - wrapcheck
+ - wsl
+
+linters-settings:
+ govet:
+ enable:
+ - shadow
+ gocyclo:
+ min-complexity: 10
+ dupl:
+ threshold: 100
+ goconst:
+ min-len: 8
+ min-occurrences: 3
+ forbidigo:
+ exclude-godoc-examples: false
+ #forbid:
+ # - (Must)?NewLexer$
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+ exclude-use-default: false
+ exclude-dirs:
+ - _examples
+ exclude:
+ # Captured by errcheck.
+ - "^(G104|G204):"
+ # Very commonly not checked.
+ - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked'
+ - 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos) should have comment or be unexported'
+ - "composite literal uses unkeyed fields"
+ - 'declaration of "err" shadows declaration'
+ - "should not use dot imports"
+ - "Potential file inclusion via variable"
+ - "should have comment or be unexported"
+ - "comment on exported var .* should be of the form"
+ - "at least one file in a package should have a package comment"
+ - "string literal contains the Unicode"
+ - "methods on the same type should have the same receiver name"
+ - "_TokenType_name should be _TokenTypeName"
+ - "`_TokenType_map` should be `_TokenTypeMap`"
+ - "rewrite if-else to switch statement"
diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md
index 2ccec4e848..287aa68b7f 100644
--- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md
+++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md
@@ -92,6 +92,12 @@ passing checks, set the `-default-signifies-exhasutive=false` flag.
As a special case, if the type switch statement contains a `default` clause
that always panics, then exhaustiveness checks are still performed.
+By default, `go-check-sumtype` will not include shared interfaces in the exhaustiviness check.
+This can be changed by setting the `-include-shared-interfaces=true` flag.
+When this flag is set, `go-check-sumtype` will not require that all concrete structs
+are listed in the switch statement, as long as the switch statement is exhaustive
+with respect to interfaces the structs implement.
+
## Details and motivation
Sum types are otherwise known as discriminated unions. That is, a sum type is
diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go
index 1a0a32517b..ff7fec728a 100644
--- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go
+++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go
@@ -29,7 +29,7 @@ func (e inexhaustiveError) Error() string {
// Names returns a sorted list of names corresponding to the missing variant
// cases.
func (e inexhaustiveError) Names() []string {
- var list []string
+ list := make([]string, 0, len(e.Missing))
for _, o := range e.Missing {
list = append(list, o.Name())
}
@@ -92,6 +92,10 @@ func missingVariantsInSwitch(
) (*sumTypeDef, []types.Object) {
asserted := findTypeAssertExpr(swtch)
ty := pkg.TypesInfo.TypeOf(asserted)
+ if ty == nil {
+ panic(fmt.Sprintf("no type found for asserted expression: %v", asserted))
+ }
+
def := findDef(defs, ty)
if def == nil {
// We couldn't find a corresponding sum type, so there's
@@ -103,11 +107,11 @@ func missingVariantsInSwitch(
// A catch-all case defeats all exhaustiveness checks.
return def, nil
}
- var variantTypes []types.Type
+ variantTypes := make([]types.Type, 0, len(variantExprs))
for _, expr := range variantExprs {
variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr))
}
- return def, def.missing(variantTypes)
+ return def, def.missing(variantTypes, config.IncludeSharedInterfaces)
}
// switchVariants returns all case expressions found in a type switch. This
diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go
index 759176eb7d..5c722b75c4 100644
--- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go
+++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go
@@ -2,4 +2,7 @@ package gochecksumtype
type Config struct {
DefaultSignifiesExhaustive bool
+ // IncludeSharedInterfaces in the exhaustiviness check. If true, we do not need to list all concrete structs, as long
+ // as the switch statement is exhaustive with respect to interfaces the structs implement.
+ IncludeSharedInterfaces bool
}
diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go
index 24729ac01b..71bdf2f72d 100644
--- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go
+++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go
@@ -71,7 +71,7 @@ type sumTypeDef struct {
// sum type declarations. If no such sum type definition could be found for
// any of the given declarations, then an error is returned.
func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) {
- var defs []sumTypeDef
+ defs := make([]sumTypeDef, 0, len(decls))
var errs []error
for _, decl := range decls {
def, err := newSumTypeDef(decl.Package.Types, decl)
@@ -104,7 +104,7 @@ func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) {
return nil, notInterfaceError{decl}
}
hasUnexported := false
- for i := 0; i < iface.NumMethods(); i++ {
+ for i := range iface.NumMethods() {
if !iface.Method(i).Exported() {
hasUnexported = true
break
@@ -145,7 +145,7 @@ func (def *sumTypeDef) String() string {
// missing returns a list of variants in this sum type that are not in the
// given list of types.
-func (def *sumTypeDef) missing(tys []types.Type) []types.Object {
+func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) []types.Object {
// TODO(ag): This is O(n^2). Fix that. /shrug
var missing []types.Object
for _, v := range def.Variants {
@@ -155,15 +155,29 @@ func (def *sumTypeDef) missing(tys []types.Type) []types.Object {
ty = indirect(ty)
if types.Identical(varty, ty) {
found = true
+ break
+ }
+ if includeSharedInterfaces && implements(varty, ty) {
+ found = true
+ break
}
}
- if !found {
+ if !found && !isInterface(varty) {
+ // we do not include interfaces extending the sumtype, as the
+ // all implementations of those interfaces are already covered
+ // by the sumtype.
missing = append(missing, v)
}
}
return missing
}
+func isInterface(ty types.Type) bool {
+ underlying := indirect(ty).Underlying()
+ _, ok := underlying.(*types.Interface)
+ return ok
+}
+
// indirect dereferences through an arbitrary number of pointer types.
func indirect(ty types.Type) types.Type {
if ty, ok := ty.(*types.Pointer); ok {
@@ -171,3 +185,11 @@ func indirect(ty types.Type) types.Type {
}
return ty
}
+
+func implements(varty, interfaceType types.Type) bool {
+ underlying := interfaceType.Underlying()
+ if interf, ok := underlying.(*types.Interface); ok {
+ return types.Implements(varty, interf) || types.Implements(types.NewPointer(varty), interf)
+ }
+ return false
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/.gitignore b/hack/tools/vendor/github.com/alingse/nilnesserr/.gitignore
new file mode 100644
index 0000000000..6f72f89261
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/.gitignore
@@ -0,0 +1,25 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+go.work.sum
+
+# env file
+.env
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/.golangci.yaml b/hack/tools/vendor/github.com/alingse/nilnesserr/.golangci.yaml
new file mode 100644
index 0000000000..1a2a270a66
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/.golangci.yaml
@@ -0,0 +1,66 @@
+linters:
+ enable-all: true
+ disable:
+ - wsl
+ - varnamelen
+ - nilnil
+ - ireturn
+ - gochecknoglobals
+ - nolintlint
+
+linters-settings:
+ depguard:
+ rules:
+ main:
+ list-mode: lax
+ files:
+ - $all
+ allow:
+ - $gostd
+ - github.com/alingse/nilnesserr
+
+issues:
+ exclude-rules:
+ - path: internal/typeparams
+ linters:
+ - nonamedreturns
+ - nlreturn
+ - intrange
+ - mnd
+ - forcetypeassert
+ - exhaustruct
+ - exhaustive
+ - err113
+ - gofumpt
+ - prealloc
+ - funclen
+ - gocritic
+ - funlen
+ - cyclop
+ - gocognit
+
+ - path: nilness.go
+ linters:
+ - nonamedreturns
+ - nlreturn
+ - nilnil
+ - mnd
+ - forcetypeassert
+ - gochecknoglobals
+ - nestif
+ - funlen
+ - godox
+ - gocognit
+ - gofumpt
+ - exhaustive
+ - cyclop
+ - unparam
+ - gocyclo
+
+ - text: "analysis."
+ linters:
+ - exhaustruct
+
+ - text: "newAnalyzer"
+ linters:
+ - unparam
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/LICENSE b/hack/tools/vendor/github.com/alingse/nilnesserr/LICENSE
new file mode 100644
index 0000000000..6caf1ea1c6
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 alingse
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/README.md b/hack/tools/vendor/github.com/alingse/nilnesserr/README.md
new file mode 100644
index 0000000000..e01bb40244
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/README.md
@@ -0,0 +1,74 @@
+# nilnesserr
+
+nilnesserr = nilness + nilerr
+
+`nilnesserr` is a linter for report return nil error in go. It combines the features of [nilness](https://cs.opensource.google/go/x/tools/+/refs/tags/v0.28.0:go/analysis/passes/nilness/nilness.go) and [nilerr](https://github.com/gostaticanalysis/nilerr), providing a concise way to detect return an unrelated/nil-values error.
+
+## Case
+
+case 1
+```go
+err := do()
+if err != nil {
+ return err
+}
+err2 := do2()
+if err2 != nil {
+ return err // which should return err2 after check `err2 != nil`, but return a nil value error
+}
+```
+
+
+## Some Real Bugs
+
+- https://github.com/alingse/sundrylint/issues/4
+- https://github.com/alingse/nilnesserr/issues/1
+
+we use https://github.com/alingse/go-linter-runner to run linter on GitHub Actions for public golang repos
+
+## Install
+
+```bash
+go install github.com/alingse/nilnesserr/cmd/nilnesserr@latest
+```
+
+
+## TODO
+
+case 2
+
+```go
+err := do()
+if err != nil {
+ return err
+}
+_, ok := do2()
+if !ok {
+ return err
+}
+
+```
+
+case 3
+
+```go
+err := do()
+if err != nil {
+ return err
+}
+_, ok := do2()
+if !ok {
+ return errors.Wrap(err)
+}
+```
+
+maybe this is also a bug, should return a non-nil value error after the if
+
+## License
+
+This project is licensed under the MIT License. See the LICENSE file for details.
+
+This project incorporates source code from two different libraries:
+
+1. [nilness](https://cs.opensource.google/go/x/tools/+/refs/tags/v0.28.0:go/analysis/passes/nilness/nilness.go) licensed under the BSD license.
+2. [nilerr](https://github.com/gostaticanalysis/nilerr) licensed under the MIT license.
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go
new file mode 100644
index 0000000000..7a744d123b
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(typ types.Type) ([]*types.Term, error) {
+ switch typ := typ.Underlying().(type) {
+ case *types.TypeParam:
+ return StructuralTerms(typ)
+ case *types.Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*types.Term{types.NewTerm(false, typ)}, nil
+ }
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go
new file mode 100644
index 0000000000..0302872f47
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go
@@ -0,0 +1,200 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+)
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+// type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*types.Term
+ for _, term := range tset.terms {
+ terms = append(terms, types.NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+var ErrNilType = errors.New("nil type")
+var ErrUnreachable = errors.New("unreachable")
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ return nil, ErrNilType
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *types.Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *types.TypeParam, *types.Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *types.TypeParam:
+ return nil, ErrUnreachable
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go
new file mode 100644
index 0000000000..cbd12f8013
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go
@@ -0,0 +1,163 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "bytes"
+ "go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" | ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go
new file mode 100644
index 0000000000..35c66003d6
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go
@@ -0,0 +1,166 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/linter.go b/hack/tools/vendor/github.com/alingse/nilnesserr/linter.go
new file mode 100644
index 0000000000..b622fa694f
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/linter.go
@@ -0,0 +1,48 @@
+package nilnesserr
+
+import (
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+)
+
+const (
+ linterName = "nilnesserr"
+ linterDoc = `This linter reports that it checks for err != nil, but it returns a different nil value error.
+powered by nilness and nilerr.`
+
+ linterMessage = "return a nil value error after check error"
+)
+
+type LinterSetting struct{}
+
+func NewAnalyzer(setting LinterSetting) (*analysis.Analyzer, error) {
+ a, err := newAnalyzer(setting)
+ if err != nil {
+ return nil, err
+ }
+
+ return &analysis.Analyzer{
+ Name: linterName,
+ Doc: linterDoc,
+ Run: a.run,
+ Requires: []*analysis.Analyzer{
+ buildssa.Analyzer,
+ },
+ }, nil
+}
+
+type analyzer struct {
+ setting LinterSetting
+}
+
+func newAnalyzer(setting LinterSetting) (*analyzer, error) {
+ a := &analyzer{setting: setting}
+
+ return a, nil
+}
+
+func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
+ _, _ = a.checkNilnesserr(pass)
+
+ return nil, nil
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/nilerr.go b/hack/tools/vendor/github.com/alingse/nilnesserr/nilerr.go
new file mode 100644
index 0000000000..c05ec90031
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/nilerr.go
@@ -0,0 +1,83 @@
+// some code was copy from https://github.com/gostaticanalysis/nilerr/blob/master/nilerr.go
+
+package nilnesserr
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ssa"
+)
+
+var errType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) // nolint: forcetypeassert
+
+func isErrType(res ssa.Value) bool {
+ return types.Implements(res.Type(), errType)
+}
+
+func isConstNil(res ssa.Value) bool {
+ v, ok := res.(*ssa.Const)
+ if ok && v.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+func extractCheckedErrorValue(binOp *ssa.BinOp) ssa.Value {
+ if isErrType(binOp.X) && isConstNil(binOp.Y) {
+ return binOp.X
+ }
+ if isErrType(binOp.Y) && isConstNil(binOp.X) {
+ return binOp.Y
+ }
+
+ return nil
+}
+
+type errFact fact
+
+func findLastNonnilValue(errors []errFact, res ssa.Value) ssa.Value {
+ if len(errors) == 0 {
+ return nil
+ }
+
+ for j := len(errors) - 1; j >= 0; j-- {
+ last := errors[j]
+ if last.value == res {
+ return nil
+ } else if last.nilness == isnonnil {
+ return last.value
+ }
+ }
+
+ return nil
+}
+
+func checkNilnesserr(pass *analysis.Pass, b *ssa.BasicBlock, errors []errFact, isNilnees func(value ssa.Value) bool) {
+ for i := range b.Instrs {
+ instr, ok := b.Instrs[i].(*ssa.Return)
+ if !ok {
+ continue
+ }
+
+ for _, res := range instr.Results {
+ if !isErrType(res) || isConstNil(res) || !isNilnees(res) {
+ continue
+ }
+ // check the lastValue error that is isnonnil
+ lastValue := findLastNonnilValue(errors, res)
+ if lastValue == nil {
+ continue
+ }
+ // report
+ pos := instr.Pos()
+ if pos.IsValid() {
+ pass.Report(analysis.Diagnostic{
+ Pos: pos,
+ Message: linterMessage,
+ })
+ }
+ }
+ }
+}
diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/nilness.go b/hack/tools/vendor/github.com/alingse/nilnesserr/nilness.go
new file mode 100644
index 0000000000..6c99041e90
--- /dev/null
+++ b/hack/tools/vendor/github.com/alingse/nilnesserr/nilness.go
@@ -0,0 +1,374 @@
+// This file was copy from https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/nilness/nilness.go
+// I modified some to check the error retrun
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nilnesserr
+
+import (
+ "go/token"
+ "go/types"
+
+ "github.com/alingse/nilnesserr/internal/typeparams"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+)
+
+func (a *analyzer) checkNilnesserr(pass *analysis.Pass) (interface{}, error) {
+ ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+ for _, fn := range ssainput.SrcFuncs {
+ runFunc(pass, fn)
+ }
+ return nil, nil
+}
+
+func runFunc(pass *analysis.Pass, fn *ssa.Function) {
+ // visit visits reachable blocks of the CFG in dominance order,
+ // maintaining a stack of dominating nilness facts.
+ //
+ // By traversing the dom tree, we can pop facts off the stack as
+ // soon as we've visited a subtree. Had we traversed the CFG,
+ // we would need to retain the set of facts for each block.
+ seen := make([]bool, len(fn.Blocks)) // seen[i] means visit should ignore block i
+
+ var visit func(b *ssa.BasicBlock, stack []fact, errors []errFact)
+
+ visit = func(b *ssa.BasicBlock, stack []fact, errors []errFact) {
+ if seen[b.Index] {
+ return
+ }
+ seen[b.Index] = true
+
+ // check this block return a nil value error
+ checkNilnesserr(
+ pass, b,
+ errors,
+ func(v ssa.Value) bool {
+ return nilnessOf(stack, v) == isnil
+ })
+
+ // For nil comparison blocks, report an error if the condition
+ // is degenerate, and push a nilness fact on the stack when
+ // visiting its true and false successor blocks.
+ if binop, tsucc, fsucc := eq(b); binop != nil {
+ // extract the err != nil or err == nil
+ errValue := extractCheckedErrorValue(binop)
+
+ xnil := nilnessOf(stack, binop.X)
+ ynil := nilnessOf(stack, binop.Y)
+
+ if ynil != unknown && xnil != unknown && (xnil == isnil || ynil == isnil) {
+ // Degenerate condition:
+ // the nilness of both operands is known,
+ // and at least one of them is nil.
+
+ // If tsucc's or fsucc's sole incoming edge is impossible,
+ // it is unreachable. Prune traversal of it and
+ // all the blocks it dominates.
+ // (We could be more precise with full dataflow
+ // analysis of control-flow joins.)
+ var skip *ssa.BasicBlock
+ if xnil == ynil {
+ skip = fsucc
+ } else {
+ skip = tsucc
+ }
+ for _, d := range b.Dominees() {
+ if d == skip && len(d.Preds) == 1 {
+ continue
+ }
+
+ visit(d, stack, errors)
+ }
+
+ return
+ }
+
+ // "if x == nil" or "if nil == y" condition; x, y are unknown.
+ if xnil == isnil || ynil == isnil {
+ var newFacts facts
+ if xnil == isnil {
+ // x is nil, y is unknown:
+ // t successor learns y is nil.
+ newFacts = expandFacts(fact{binop.Y, isnil})
+ } else {
+ // y is nil, x is unknown:
+ // t successor learns x is nil.
+ newFacts = expandFacts(fact{binop.X, isnil})
+ }
+
+ for _, d := range b.Dominees() {
+ // Successor blocks learn a fact
+ // only at non-critical edges.
+ // (We could do be more precise with full dataflow
+ // analysis of control-flow joins.)
+ s := stack
+ errs := errors
+ if len(d.Preds) == 1 {
+ if d == tsucc {
+ s = append(s, newFacts...)
+ // add nil error
+ if errValue != nil {
+ errs = append(errs, errFact{value: errValue, nilness: isnil})
+ }
+ } else if d == fsucc {
+ s = append(s, newFacts.negate()...)
+ // add non-nil error
+ if errValue != nil {
+ errs = append(errs, errFact{value: errValue, nilness: isnonnil})
+ }
+ }
+ }
+
+ visit(d, s, errs)
+ }
+ return
+ }
+ }
+
+ // In code of the form:
+ //
+ // if ptr, ok := x.(*T); ok { ... } else { fsucc }
+ //
+ // the fsucc block learns that ptr == nil,
+ // since that's its zero value.
+ if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok {
+ // Handle "if ok" and "if !ok" variants.
+ cond, fsucc := If.Cond, b.Succs[1]
+ if unop, ok := cond.(*ssa.UnOp); ok && unop.Op == token.NOT {
+ cond, fsucc = unop.X, b.Succs[0]
+ }
+
+ // Match pattern:
+ // t0 = typeassert (pointerlike)
+ // t1 = extract t0 #0 // ptr
+ // t2 = extract t0 #1 // ok
+ // if t2 goto tsucc, fsucc
+ if extract1, ok := cond.(*ssa.Extract); ok && extract1.Index == 1 {
+ if assert, ok := extract1.Tuple.(*ssa.TypeAssert); ok &&
+ isNillable(assert.AssertedType) {
+ for _, pinstr := range *assert.Referrers() {
+ if extract0, ok := pinstr.(*ssa.Extract); ok &&
+ extract0.Index == 0 &&
+ extract0.Tuple == extract1.Tuple {
+ for _, d := range b.Dominees() {
+ if len(d.Preds) == 1 && d == fsucc {
+ visit(d, append(stack, fact{extract0, isnil}), errors)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ for _, d := range b.Dominees() {
+ visit(d, stack, errors)
+ }
+ }
+
+ // Visit the entry block. No need to visit fn.Recover.
+ if fn.Blocks != nil {
+ visit(fn.Blocks[0], make([]fact, 0, 20), nil) // 20 is plenty
+ }
+}
+
+// A fact records that a block is dominated
+// by the condition v == nil or v != nil.
+type fact struct {
+ value ssa.Value
+ nilness nilness
+}
+
+func (f fact) negate() fact { return fact{f.value, -f.nilness} }
+
+type nilness int
+
+const (
+ isnonnil = -1
+ unknown nilness = 0
+ isnil = 1
+)
+
+var nilnessStrings = []string{"non-nil", "unknown", "nil"}
+
+func (n nilness) String() string { return nilnessStrings[n+1] }
+
+// nilnessOf reports whether v is definitely nil, definitely not nil,
+// or unknown given the dominating stack of facts.
+func nilnessOf(stack []fact, v ssa.Value) nilness {
+ switch v := v.(type) {
+ // unwrap ChangeInterface and Slice values recursively, to detect if underlying
+ // values have any facts recorded or are otherwise known with regard to nilness.
+ //
+ // This work must be in addition to expanding facts about
+ // ChangeInterfaces during inference/fact gathering because this covers
+ // cases where the nilness of a value is intrinsic, rather than based
+ // on inferred facts, such as a zero value interface variable. That
+ // said, this work alone would only inform us when facts are about
+ // underlying values, rather than outer values, when the analysis is
+ // transitive in both directions.
+ case *ssa.ChangeInterface:
+ if underlying := nilnessOf(stack, v.X); underlying != unknown {
+ return underlying
+ }
+ case *ssa.MakeInterface:
+ // A MakeInterface is non-nil unless its operand is a type parameter.
+ tparam, ok := types.Unalias(v.X.Type()).(*types.TypeParam)
+ if !ok {
+ return isnonnil
+ }
+
+ // A MakeInterface of a type parameter is non-nil if
+ // the type parameter cannot be instantiated as an
+ // interface type (#66835).
+ if terms, err := typeparams.NormalTerms(tparam.Constraint()); err == nil && len(terms) > 0 {
+ return isnonnil
+ }
+
+ // If the type parameter can be instantiated as an
+ // interface (and thus also as a concrete type),
+ // we can't determine the nilness.
+
+ case *ssa.Slice:
+ if underlying := nilnessOf(stack, v.X); underlying != unknown {
+ return underlying
+ }
+ case *ssa.SliceToArrayPointer:
+ nn := nilnessOf(stack, v.X)
+ if slice2ArrayPtrLen(v) > 0 {
+ if nn == isnil {
+ // We know that *(*[1]byte)(nil) is going to panic because of the
+ // conversion. So return unknown to the caller, prevent useless
+ // nil deference reporting due to * operator.
+ return unknown
+ }
+ // Otherwise, the conversion will yield a non-nil pointer to array.
+ // Note that the instruction can still panic if array length greater
+ // than slice length. If the value is used by another instruction,
+ // that instruction can assume the panic did not happen when that
+ // instruction is reached.
+ return isnonnil
+ }
+ // In case array length is zero, the conversion result depends on nilness of the slice.
+ if nn != unknown {
+ return nn
+ }
+ }
+
+ // Is value intrinsically nil or non-nil?
+ switch v := v.(type) {
+ case *ssa.Alloc,
+ *ssa.FieldAddr,
+ *ssa.FreeVar,
+ *ssa.Function,
+ *ssa.Global,
+ *ssa.IndexAddr,
+ *ssa.MakeChan,
+ *ssa.MakeClosure,
+ *ssa.MakeMap,
+ *ssa.MakeSlice:
+ return isnonnil
+
+ case *ssa.Const:
+ if v.IsNil() {
+ return isnil // nil or zero value of a pointer-like type
+ } else {
+ return unknown // non-pointer
+ }
+ }
+
+ // Search dominating control-flow facts.
+ for _, f := range stack {
+ if f.value == v {
+ return f.nilness
+ }
+ }
+ return unknown
+}
+
+func slice2ArrayPtrLen(v *ssa.SliceToArrayPointer) int64 {
+ return v.Type().(*types.Pointer).Elem().Underlying().(*types.Array).Len()
+}
+
+// If b ends with an equality comparison, eq returns the operation and
+// its true (equal) and false (not equal) successors.
+func eq(b *ssa.BasicBlock) (op *ssa.BinOp, tsucc, fsucc *ssa.BasicBlock) {
+ if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok {
+ if binop, ok := If.Cond.(*ssa.BinOp); ok {
+ switch binop.Op {
+ case token.EQL:
+ return binop, b.Succs[0], b.Succs[1]
+ case token.NEQ:
+ return binop, b.Succs[1], b.Succs[0]
+ }
+ }
+ }
+ return nil, nil, nil
+}
+
+// expandFacts takes a single fact and returns the set of facts that can be
+// known about it or any of its related values. Some operations, like
+// ChangeInterface, have transitive nilness, such that if you know the
+// underlying value is nil, you also know the value itself is nil, and vice
+// versa. This operation allows callers to match on any of the related values
+// in analyses, rather than just the one form of the value that happened to
+// appear in a comparison.
+//
+// This work must be in addition to unwrapping values within nilnessOf because
+// while this work helps give facts about transitively known values based on
+// inferred facts, the recursive check within nilnessOf covers cases where
+// nilness facts are intrinsic to the underlying value, such as a zero value
+// interface variables.
+//
+// ChangeInterface is the only expansion currently supported, but others, like
+// Slice, could be added. At this time, this tool does not check slice
+// operations in a way this expansion could help. See
+// https://play.golang.org/p/mGqXEp7w4fR for an example.
+func expandFacts(f fact) []fact {
+ ff := []fact{f}
+
+Loop:
+ for {
+ switch v := f.value.(type) {
+ case *ssa.ChangeInterface:
+ f = fact{v.X, f.nilness}
+ ff = append(ff, f)
+ default:
+ break Loop
+ }
+ }
+
+ return ff
+}
+
+type facts []fact
+
+func (ff facts) negate() facts {
+ nn := make([]fact, len(ff))
+ for i, f := range ff {
+ nn[i] = f.negate()
+ }
+ return nn
+}
+
+func isNillable(t types.Type) bool {
+ // TODO(adonovan): CoreType (+ case *Interface) looks wrong.
+ // This should probably use Underlying, and handle TypeParam
+ // by computing the union across its normal terms.
+ switch t := typeparams.CoreType(t).(type) {
+ case *types.Pointer,
+ *types.Map,
+ *types.Signature,
+ *types.Chan,
+ *types.Interface,
+ *types.Slice:
+ return true
+ case *types.Basic:
+ return t == types.Typ[types.UnsafePointer]
+ }
+ return false
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/.gitignore
new file mode 100644
index 0000000000..38ea34ff51
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/.gitignore
@@ -0,0 +1,18 @@
+### Go template
+
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+
+# Go workspace file
+go.work
+
+# No Goland stuff in this repo
+.idea
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/LICENSE
new file mode 100644
index 0000000000..a22292eb5a
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+3. Neither name of copyright holders nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/README.md b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/README.md
new file mode 100644
index 0000000000..03e5b83eb1
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/README.md
@@ -0,0 +1,54 @@
+[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
+[](https://pkg.go.dev/github.com/antlr4-go/antlr)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/commit-activity)
+[](https://opensource.org/licenses/BSD-3-Clause)
+[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
+# ANTLR4 Go Runtime Module Repo
+
+IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
+
+ - Do not submit PRs or any change requests to this repo
+ - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
+ - This repo contains the Go runtime that your generated projects should import
+
+## Introduction
+
+This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
+at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
+the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
+
+The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
+
+### Why?
+
+The `go get` command is unable to retrieve the Go runtime when it is embedded so
+deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
+does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
+causes confusion.
+
+For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
+)
+```
+
+Where you would expect to see:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
+)
+```
+
+The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
+from whence users can expect `go get` to behave as expected.
+
+
+# Documentation
+Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
+migrating existing projects to use the new module location and for information on how to use the Go runtime in
+general.
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
new file mode 100644
index 0000000000..3bb4fd7c4e
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
@@ -0,0 +1,102 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Go Runtime
+
+At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
+source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
+ANTLR4 that it is compatible with (I.E. uses the /v4 path).
+
+However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
+of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
+This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
+list the release tag such as @4.12.0 - this was confusing, to say the least.
+
+As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
+(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
+which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
+
+This means that if you are using the source code without modules, you should also use the source code in the [new repo].
+Though we highly recommend that you use go modules, as they are now idiomatic for Go.
+
+I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
+
+Go runtime author: [Jim Idle] jimi@idle.ws
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
+it was at any point in its history.
+
+Here is a general/recommended template for an ANTLR based recognizer in Go:
+
+ .
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.1-complete.jar
+ │ ├── generate.go
+ │ └── generate.sh
+ ├── parsing - generated code goes here
+ │ └── error_listeners.go
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
+
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
+is to generate the code into a
+
+From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
+
+ go generate ./...
+
+Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
+by importing the parsing package.
+
+There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
+
+# Copyright Notice
+
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
+[new repo]: https://github.com/antlr4-go/antlr
+[Jim Idle]: https://github.com/jimidle
+[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
+*/
+package antlr
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go
new file mode 100644
index 0000000000..cdeefed247
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "sync"
+
+// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
+// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
+var ATNInvalidAltNumber int
+
+// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
+// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
+// in existence.
+//
+// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
+//
+// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
+// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
+// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
+type ATN struct {
+
+ // DecisionToState is the decision points for all rules, sub-rules, optional
+ // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
+ // can go back later and build DFA predictors for them. This includes
+ // all the rules, sub-rules, optional blocks, ()+, ()* etc...
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ // ATNStates is a list of all states in the ATN, ordered by state number.
+ //
+ states []ATNState
+
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
+}
+
+// NewATN returns a new ATN struct representing the given grammarType and is used
+// for runtime deserialization of ATNs from the code generated by the ANTLR tool
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes and returns the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
+// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ iset := s.GetNextTokenWithinRule()
+ if iset == nil {
+ iset = a.NextTokensInContext(s, nil)
+ iset.readOnly = true
+ s.SetNextTokenWithinRule(iset)
+ }
+ return iset
+}
+
+// NextTokens computes and returns the set of valid tokens starting in state s, by
+// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
new file mode 100644
index 0000000000..a83f25d349
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
@@ -0,0 +1,335 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+const (
+ lexerConfig = iota // Indicates that this ATNConfig is for a lexer
+ parserConfig // Indicates that this ATNConfig is for a parser
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive in the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context *PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+ cType int // lexerConfig or parserConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
+func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ return NewATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
+func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ pac := &ATNConfig{}
+ pac.state = state
+ pac.alt = alt
+ pac.context = context
+ pac.semanticContext = semanticContext
+ pac.cType = parserConfig
+ return pac
+}
+
+// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
+func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
+func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
+func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
+func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ return NewATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
+// are just wrappers around this one.
+func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
+ }
+ b := &ATNConfig{}
+ b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
+ b.cType = parserConfig
+ return b
+}
+
+func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
+
+ a.state = state
+ a.alt = alt
+ a.context = context
+ a.semanticContext = semanticContext
+ a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
+ a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
+}
+
+func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
+ return a.precedenceFilterSuppressed
+}
+
+func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ a.precedenceFilterSuppressed = v
+}
+
+// GetState returns the ATN state associated with this configuration
+func (a *ATNConfig) GetState() ATNState {
+ return a.state
+}
+
+// GetAlt returns the alternative associated with this configuration
+func (a *ATNConfig) GetAlt() int {
+ return a.alt
+}
+
+// SetContext sets the rule invocation stack associated with this configuration
+func (a *ATNConfig) SetContext(v *PredictionContext) {
+ a.context = v
+}
+
+// GetContext returns the rule invocation stack associated with this configuration
+func (a *ATNConfig) GetContext() *PredictionContext {
+ return a.context
+}
+
+// GetSemanticContext returns the semantic context associated with this configuration
+func (a *ATNConfig) GetSemanticContext() SemanticContext {
+ return a.semanticContext
+}
+
+// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
+func (a *ATNConfig) GetReachesIntoOuterContext() int {
+ return a.reachesIntoOuterContext
+}
+
+// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
+func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
+ a.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
+ switch a.cType {
+ case lexerConfig:
+ return a.LEquals(o)
+ case parserConfig:
+ return a.PEquals(o)
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
+ var other, ok = o.(*ATNConfig)
+
+ if !ok {
+ return false
+ }
+ if a == other {
+ return true
+ } else if other == nil {
+ return false
+ }
+
+ var equal bool
+
+ if a.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = a.context.Equals(other.context)
+ }
+
+ var (
+ nums = a.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = a.alt == other.alt
+ cons = a.semanticContext.Equals(other.semanticContext)
+ sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) Hash() int {
+ switch a.cType {
+ case lexerConfig:
+ return a.LHash()
+ case parserConfig:
+ return a.PHash()
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) PHash() int {
+ var c int
+ if a.context != nil {
+ c = a.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+// String returns a string representation of the ATNConfig, usually used for debugging purposes
+func (a *ATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if a.context != nil {
+ s1 = ",[" + fmt.Sprint(a.context) + "]"
+ }
+
+ if a.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(a.semanticContext)
+ }
+
+ if a.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LHash() int {
+ var f int
+ if a.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, a.context.Hash())
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, a.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
+ var otherT, ok = other.(*ATNConfig)
+ if !ok {
+ return false
+ } else if a == otherT {
+ return true
+ } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ switch {
+ case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
+ return true
+ case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
+ if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
+ return false
+ }
+ default:
+ return false // One but not both, are nil
+ }
+
+ return a.PEquals(otherT)
+}
+
+func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
new file mode 100644
index 0000000000..52dbaf8064
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
@@ -0,0 +1,301 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type ATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two ATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
+
+ // configs is the added elements that did not match an existing key in configLookup
+ configs []*ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the ATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from this set.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+// Alts returns the combined set of alts for all the configurations in this set.
+func (b *ATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+// NewATNConfigSet creates a new ATNConfigSet instance.
+func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _),
+// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
+// 'pi' is the [ATNConfig].semanticContext.
+//
+// We use (s,i,pi) as the key.
+// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+// GetStates returns the set of states represented by all configurations in this config set
+func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator and Hash() provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *ATNConfigSet) GetPredicates() []SemanticContext {
+ predicates := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ predicates = append(predicates, c)
+ }
+ }
+
+ return predicates
+}
+
+func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ // Empty indicate no optimization is possible
+ if b.configLookup == nil || b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare The configs are only equal if they are in the same order and their Equals function returns true.
+// Java uses ArrayList.equals(), which requires the same order.
+func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+ for i := 0; i < len(b.configs); i++ {
+ if !b.configs[i].Equals(bs.configs[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*ATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*ATNConfigSet)
+ var eca bool
+ switch {
+ case b.conflictingAlts == nil && other2.conflictingAlts == nil:
+ eca = true
+ case b.conflictingAlts != nil && other2.conflictingAlts != nil:
+ eca = b.conflictingAlts.equals(other2.conflictingAlts)
+ }
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ eca &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *ATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *ATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
+ if b.readOnly {
+ panic("not implemented for read-only sets")
+ }
+ if b.configLookup == nil {
+ return false
+ }
+ return b.configLookup.Contains(item)
+}
+
+func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
+ return b.Contains(item)
+}
+
+func (b *ATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+ b.configs = make([]*ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
+}
+
+func (b *ATNConfigSet) String() string {
+
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
+// for use in lexers.
+func NewOrderedATNConfigSet() *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
+ fullCtx: false,
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
new file mode 100644
index 0000000000..bdb30b3622
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
@@ -0,0 +1,62 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "errors"
+
+var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func (opts *ATNDeserializationOptions) ReadOnly() bool {
+ return opts.readOnly
+}
+
+func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.readOnly = readOnly
+}
+
+func (opts *ATNDeserializationOptions) VerifyATN() bool {
+ return opts.verifyATN
+}
+
+func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.verifyATN = verifyATN
+}
+
+func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
+ return opts.generateRuleBypassTransitions
+}
+
+func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.generateRuleBypassTransitions = generateRuleBypassTransitions
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
+ return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
+}
+
+func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+ if other != nil {
+ *o = *other
+ o.readOnly = false
+ }
+ return o
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
new file mode 100644
index 0000000000..2dcb9ae11b
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
@@ -0,0 +1,684 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+const serializedVersion = 4
+
+type loopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type blockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ options *ATNDeserializationOptions
+ data []int32
+ pos int
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = &defaultATNDeserializationOptions
+ }
+
+ return &ATNDeserializer{options: options}
+}
+
+//goland:noinspection GoUnusedFunction
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
+ a.data = data
+ a.pos = 0
+ a.checkVersion()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := a.readSets(atn, nil)
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != serializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ nstates := a.readInt()
+
+ // Allocate worst case size.
+ loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
+ endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
+
+ // Preallocate states slice.
+ atn.states = make([]ATNState, 0, nstates)
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for _, pair := range loopBackStateNumbers {
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for _, pair := range endStateNumbers {
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules)
+
+ for i := range atn.ruleToStartState {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules)
+
+ for _, state := range atn.states {
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+ atn.modeToStartState = make([]*TokensStartState, nmodes)
+
+ for i := range atn.modeToStartState {
+ s := a.readInt()
+
+ atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
+ }
+}
+
+func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
+ m := a.readInt()
+
+ // Preallocate the needed capacity.
+ if cap(sets)-len(sets) < m {
+ isets := make([]*IntervalSet, len(sets), len(sets)+m)
+ copy(isets, sets)
+ sets = isets
+ }
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := a.readInt()
+ i2 := a.readInt()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for _, state := range atn.states {
+ for _, t := range state.GetTransitions() {
+ var rt, ok = t.(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if rt.precedence == 0 {
+ outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for _, state := range atn.states {
+ if s2, ok := state.(BlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.getEndState() == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.getEndState().startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.getEndState().startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count)
+
+ for i := range atn.lexerActions {
+ actionType := a.readInt()
+ data1 := a.readInt()
+ data2 := a.readInt()
+ atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(&bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the [ATN] to determine if an ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.options.VerifyATN() {
+ return
+ }
+
+ // Verify assumptions
+ for _, state := range atn.states {
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2.transitions[0].getTarget().(type) {
+ case *StarBlockStartState:
+ _, ok := s2.transitions[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(s2.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case BlockStartState:
+ a.checkCondition(s2.getEndState() != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v) // data is 32 bits but int is at least that big
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
new file mode 100644
index 0000000000..afe6c9f809
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
+ visited := NewVisitRecord()
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
new file mode 100644
index 0000000000..2ae5807cdb
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
@@ -0,0 +1,461 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ Hash() int
+ Equals(Collectable[ATNState]) bool
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) Hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
+ as.epsilonOnlyTransitions = false
+ }
+
+ // TODO: Check code for already present compared to the Java equivalent
+ //alreadyPresent := false
+ //for _, t := range as.transitions {
+ // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
+ // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
+ // alreadyPresent = true
+ // break
+ // }
+ // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
+ // alreadyPresent = true
+ // break
+ // }
+ //}
+ //if !alreadyPresent {
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+ //} else {
+ // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
+ //}
+}
+
+type BasicState struct {
+ BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ return &BasicState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ }
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ }
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ },
+ }
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ return &BasicBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ return &BlockEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockEnd,
+ },
+ startState: nil,
+ }
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ return &RuleStopState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStop,
+ },
+ }
+}
+
+type RuleStartState struct {
+ BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ return &RuleStartState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStart,
+ },
+ }
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ return &PlusLoopbackState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusLoopBack,
+ },
+ },
+ }
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ return &PlusBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ return &StarBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ return &StarLoopbackState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopBack,
+ },
+ }
+}
+
+type StarLoopEntryState struct {
+ BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopEntry,
+ },
+ },
+ }
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ return &LoopEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateLoopEnd,
+ },
+ }
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ return &TokensStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateTokenStart,
+ },
+ },
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
new file mode 100644
index 0000000000..3a515a145f
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
new file mode 100644
index 0000000000..bd8127b6b5
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(Interval) string
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
new file mode 100644
index 0000000000..1bb0314ea0
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
new file mode 100644
index 0000000000..b75da9df08
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
@@ -0,0 +1,450 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index into [tokens] of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of [IntStream] for a description of initializing methods.
+ index int
+
+ // tokenSource is the [TokenSource] from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens contains all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
+// tokens and will pull tokens from the given lexer channel.
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+// GetAllTokens returns all tokens currently pulled from the token source.
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(_ int) {}
+
+func (c *CommonTokenStream) Reset() {
+ c.fetchedEOF = false
+ c.tokens = make([]Token, 0)
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+ c.fetchedEOF = false
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between 'i' and [TokenEOF].
+func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ c.Fill()
+ return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
+ c.lazyInit()
+ c.Sync(interval.Stop)
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/comparators.go
new file mode 100644
index 0000000000..7467e9b43d
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/comparators.go
@@ -0,0 +1,150 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+// This file contains all the implementations of custom comparators used for generic collections when the
+// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
+// put the comparators in the source file for the struct themselves, but given the organization of this code is
+// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
+// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
+// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
+// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
+// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
+
+// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
+// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
+// type safety and avoid having to implement this for every type that we want to perform comparison on.
+//
+// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
+// allows us to use it in any collection instance that does not require a special hash or equals implementation.
+type ObjEqComparator[T Collectable[T]] struct{}
+
+var (
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[*ATNConfig]{}
+
+ // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
+ aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
+ dfaStateEqInst = &ObjEqComparator[*DFAState]{}
+ semctxEqInst = &ObjEqComparator[SemanticContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
+ pContextEqInst = &ObjEqComparator[*PredictionContext]{}
+)
+
+// Equals2 delegates to the Equals() method of type T
+func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
+ return o1.Equals(o2)
+}
+
+// Hash1 delegates to the Hash() method of type T
+func (c *ObjEqComparator[T]) Hash1(o T) int {
+
+ return o.Hash()
+}
+
+type SemCComparator[T Collectable[T]] struct{}
+
+// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type ATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
+type ATNAltConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetContext().Equals(o2.GetContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, o.GetState().GetStateNumber())
+ h = murmurUpdate(h, o.GetContext().Hash())
+ return murmurFinish(h, 2)
+}
+
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type BaseATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
+// delegates to the standard Hash() method of the ATNConfig type.
+func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+ return o.Hash()
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/configuration.go
new file mode 100644
index 0000000000..c2b724514d
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/configuration.go
@@ -0,0 +1,214 @@
+package antlr
+
+type runtimeConfiguration struct {
+ statsTraceStacks bool
+ lexerATNSimulatorDebug bool
+ lexerATNSimulatorDFADebug bool
+ parserATNSimulatorDebug bool
+ parserATNSimulatorTraceATNSim bool
+ parserATNSimulatorDFADebug bool
+ parserATNSimulatorRetryDebug bool
+ lRLoopEntryBranchOpt bool
+ memoryManager bool
+}
+
+// Global runtime configuration
+var runtimeConfig = runtimeConfiguration{
+ lRLoopEntryBranchOpt: true,
+}
+
+type runtimeOption func(*runtimeConfiguration) error
+
+// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
+// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
+// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
+// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
+// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
+// memory allocation type used by the runtime such as sync.Pool or not.
+//
+// The options are applied in the order they are passed in, so the last option will override any previous options.
+//
+// For example, if you want to turn on the collection create point stack flag to true, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// If you want to turn it off, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func ConfigureRuntime(options ...runtimeOption) error {
+ for _, option := range options {
+ err := option(&runtimeConfig)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
+// certain structs, such as collections, or the use point of certain methods such as Put().
+// Because this can be expensive, it is turned off by default. However, it
+// can be useful to track down exactly where memory is being created and used.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func WithStatsTraceStacks(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.statsTraceStacks = trace
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
+func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
+func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
+func WithParserATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
+// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
+func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorTraceATNSim = trace
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
+func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// Only useful to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
+func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorRetryDebug = debug
+ return nil
+ }
+}
+
+// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
+// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
+//
+// Note that default is to use this optimization.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
+func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lRLoopEntryBranchOpt = off
+ return nil
+ }
+}
+
+// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
+// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
+// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
+// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
+// grammar, this may help make it more practical.
+//
+// Note that default is to use normal Go memory allocation and not pool memory.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
+//
+// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
+// and should remember to nil out any references to the parser or lexer when you are done with them.
+func WithMemoryManager(use bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.memoryManager = use
+ return nil
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa.go
new file mode 100644
index 0000000000..6b63eb1589
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
+// reach and the transitions between them.
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there. Go maps implement key hash collisions and so on and are very
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
+ // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
+ // to see if they really are the same object. Hence, we have our own map storage.
+ //
+ states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
+
+ numstates int
+
+ s0 *DFAState
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ dfa := &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: nil, // Lazy initialize
+ }
+ if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
+ dfa.precedenceDfa = true
+ dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
+ dfa.s0.isAcceptState = false
+ dfa.s0.requiresFullContext = false
+ }
+ return dfa
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
+ return nil
+ }
+
+ return d.getS0().getIthEdge(precedence)
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ s0 := d.getS0()
+ if precedence >= s0.numEdges() {
+ edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
+ s0.setEdges(edges)
+ d.setS0(s0)
+ }
+
+ s0.setIthEdge(precedence, startState)
+}
+
+func (d *DFA) getPrecedenceDfa() bool {
+ return d.precedenceDfa
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.getPrecedenceDfa() != precedenceDfa {
+ d.states = nil // Lazy initialize
+ d.numstates = 0
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewATNConfigSet(false))
+ precedenceState.setEdges(make([]*DFAState, 0))
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.setS0(precedenceState)
+ } else {
+ d.setS0(nil)
+ }
+
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
+// instantiation of the states JMap.
+func (d *DFA) Len() int {
+ if d.states == nil {
+ return 0
+ }
+ return d.states.Len()
+}
+
+// Get returns a state that matches s if it is present in the DFA state set. We defer to this
+// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
+func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ return nil, false
+ }
+ return d.states.Get(s)
+}
+
+func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
+ }
+ return d.states.Put(s)
+}
+
+func (d *DFA) getS0() *DFAState {
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0 = s
+}
+
+// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
+func (d *DFA) sortedStates() []*DFAState {
+ if d.states == nil {
+ return []*DFAState{}
+ }
+ vs := d.states.SortedSlice(func(i, j *DFAState) bool {
+ return i.stateNumber < j.stateNumber
+ })
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
new file mode 100644
index 0000000000..0e11009899
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(i))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
new file mode 100644
index 0000000000..6541430745
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1, a2,..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an."
+//
+// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
+// states the [ATN] could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
+// state (ala normal conversion) and a [RuleContext] describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A [DFAState] may have multiple references to a particular state, but with
+// different [ATN] contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs *ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+
+ isAcceptState bool
+
+ // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() []int {
+ var alts []int
+
+ if d.configs != nil {
+ for _, c := range d.configs.configs {
+ alts = append(alts, c.GetAlt())
+ }
+ }
+
+ if len(alts) == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) getEdges() []*DFAState {
+ return d.edges
+}
+
+func (d *DFAState) numEdges() int {
+ return len(d.edges)
+}
+
+func (d *DFAState) getIthEdge(i int) *DFAState {
+ return d.edges[i]
+}
+
+func (d *DFAState) setEdges(newEdges []*DFAState) {
+ d.edges = newEdges
+}
+
+func (d *DFAState) setIthEdge(i int, edge *DFAState) {
+ d.edges[i] = edge
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) Hash() int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, d.configs.Hash())
+ return murmurFinish(h, 1)
+}
+
+// Equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
+ if d == o {
+ return true
+ }
+
+ return d.configs.Equals(o.(*DFAState).configs)
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
new file mode 100644
index 0000000000..bd2cd8bc3a
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+//
Ambiguities: These are cases where more than one path through the
+// grammar can Match the input.
+//
Weak context sensitivity: These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+//
Strong (forced) context sensitivity: These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.configs {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
new file mode 100644
index 0000000000..21a0216434
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+// SyntaxError prints messages to System.err containing the
+// values of line, charPositionInLine, and msg using
+// the following format:
+//
+// line :
+func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
+ _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
new file mode 100644
index 0000000000..9db2be1c74
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
@@ -0,0 +1,702 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
+// error reporting and recovery in ANTLR parsers.
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+//
The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+ return d.errorRecoveryMode
+}
+
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// ReportError is the default implementation of error reporting.
+// It returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls [beginErrorCondition]
+// and dispatches the Reporting task based on the runtime type of e
+// according to the following table.
+//
+// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
+// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
+// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
+// All other types : Calls [NotifyErrorListeners] to Report the exception
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// Recover is the default recovery implementation.
+// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
+// loosely the set of tokens that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.GetErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// Sync is the default implementation of error strategy synchronization.
+//
+// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
+// at this point in the [ATN]. You can call this anytime but ANTLR only
+// generates code to check before sub-rules/loops and each iteration.
+//
+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
+// sub-rules. E.g.:
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+// At the start of a sub-rule upon error, Sync performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub-rule is optional
+//
+// ({@code (...)?}, {@code (...)*},
+//
+// or a block with an empty alternative), then the expected set includes what follows
+// the sub-rule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// # Origins
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule:
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a bit of effort because the parser has to
+// compare the token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off this
+// functionality by simply overriding this method as empty:
+//
+// { }
+//
+// [Jim Idle]: https://github.com/jimidle
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+//
+// See also [ReportError]
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportUnwantedToken is called to report a syntax error that requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is the current LT(1) symbol and has not yet been
+// removed from the input stream. When this method returns,
+// recognizer is in error recovery mode.
+//
+// This method is called when singleTokenDeletion identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling
+// [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// ReportMissingToken is called to report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time this
+// method is called, the missing token has not yet been inserted. When this
+// method returns, recognizer is in error recovery mode.
+//
+// This method is called when singleTokenInsertion identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The RecoverInline default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, this method panics with [InputMisMatchException}.
+// TODO: Not sure that panic() is the right thing to do here - JI
+//
+// # EXTRA TOKEN (single token deletion)
+//
+// LA(1) is not what we are looking for. If LA(2) has the
+// right token, however, then assume LA(1) is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the LA(2) token) as the successful result of the Match operation.
+//
+// # This recovery strategy is implemented by singleTokenDeletion
+//
+// # MISSING TOKEN (single token insertion)
+//
+// If current token -at LA(1) - is consistent with what could come
+// after the expected LA(1) token, then assume the token is missing
+// and use the parser's [TokenFactory] to create it on the fly. The
+// “insertion” is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by [SingleTokenInsertion].
+//
+// # Example
+//
+// For example, Input i=(3 is clearly missing the ')'. When
+// the parser returns from the nested call to expr, it will have
+// call the chain:
+//
+// stat → expr → atom
+//
+// and it will be trying to Match the ')' at this point in the
+// derivation:
+//
+// : ID '=' '(' INT ')' ('+' atom)* ';'
+// ^
+//
+// The attempt to [Match] ')' will fail when it sees ';' and
+// call [RecoverInline]. To recover, it sees that LA(1)==';'
+// is in the set of tokens that can follow the ')' token reference
+// in rule atom. It can assume that you forgot the ')'.
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ return nil
+}
+
+// SingleTokenInsertion implements the single-token insertion inline error recovery
+// strategy. It is called by [RecoverInline] if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether single-token insertion is viable by
+// checking if the LA(1) input symbol could be successfully Matched
+// if it were instead the LA(2) symbol. If this method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce this behavior.
+//
+// This func returns true if single-token insertion is a viable recovery
+// strategy for the current mismatched input.
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// SingleTokenDeletion implements the single-token deletion inline error recovery
+// strategy. It is called by [RecoverInline] to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// recognizer will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, this method calls
+// [ReportUnwantedToken] to Report the error, followed by
+// [Consume] to actually “delete” the extraneous token. Then,
+// before returning, [ReportMatch] is called to signal a successful
+// Match.
+//
+// The func returns the successfully Matched [Token] instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise nil.
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// GetMissingSymbol conjures up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example:
+//
+// x=ID {f($x)}.
+//
+// The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// this token is missing, and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a [CommonToken] of the appropriate type. The text will be the token name.
+// If you need to change which tokens must be created by the lexer,
+// override this method to create the appropriate tokens.
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO: matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// GetTokenErrorDisplay determines how a token should be displayed in an error message.
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override this func in that case
+// to use t.String() (which, for [CommonToken], dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a new type.
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack. This amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+//
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// # Example
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r or any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider the grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+// ;
+//
+// b : c '^' INT
+// ;
+//
+// c : ID
+// | INT
+// ;
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input “[]”, the call chain is
+//
+// a → b → c
+//
+// and, hence, the follow context stack is:
+//
+// Depth Follow set Start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets - the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c:
+//
+// {']','^'}
+//
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
+// [A note on error recovery in recursive descent parsers].
+//
+// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers]
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
+// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+//
+// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
+// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
+// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+// by immediately canceling the parse operation with a
+// [ParseCancellationException]. The implementation ensures that the
+// [ParserRuleContext//exception] field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+// This error strategy is useful in the following scenarios.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of [BailErrorStrategy.Sync] improves the performance of
+// the first stage.
+//
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the [BailErrorStrategy] avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Recover Instead of recovering from exception e, re-panic it wrapped
+// in a [ParseCancellationException] so it is not caught by the
+// rule func catches. Use Exception.GetCause() to get the
+// original [RecognitionException].
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+}
+
+// RecoverInline makes sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Sync makes sure we don't attempt to recover from problems in sub-rules.
+func (b *BailErrorStrategy) Sync(_ Parser) {
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/errors.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/errors.go
new file mode 100644
index 0000000000..8f0f2f601f
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/errors.go
@@ -0,0 +1,259 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+
+ // The current Token when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ //
+ t.offendingToken = nil
+
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
+ // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
+ //
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+//
If the state number is not known, b method returns -1.
+
+// getExpectedTokens gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time this exception was raised.
+//
+// If the set of expected tokens is not known and could not be computed,
+// this method returns nil.
+//
+// The func returns the set of token types that could potentially follow the current
+// state in the {ATN}, or nil if the information is not available.
+
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs *ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs *ATNConfigSet
+}
+
+// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error.
+//
+// Reported by [ReportNoViableAlternative]
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)
+ n.deadEndConfigs = deadEndConfigs
+
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it.
+ //
+ // At the time the error occurred, of course the stream needs to keep a
+ // buffer of all the tokens, but later we might not have access to those.
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func (p ParseCancellationException) GetOffendingToken() Token {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetMessage() string {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetInputStream() IntStream {
+ //TODO implement me
+ panic("implement me")
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
new file mode 100644
index 0000000000..5f65f809be
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ InputStream
+ filename string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func(f *os.File) {
+ errF := f.Close()
+ if errF != nil {
+ }
+ }(f)
+
+ reader := bufio.NewReader(f)
+ fInfo, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ fs := &FileStream{
+ InputStream: InputStream{
+ index: 0,
+ name: fileName,
+ },
+ filename: fileName,
+ }
+
+ // Pre-build the buffer and read runes efficiently
+ //
+ fs.data = make([]rune, 0, fInfo.Size())
+ for {
+ r, _, err := reader.ReadRune()
+ if err != nil {
+ break
+ }
+ fs.data = append(fs.data, r)
+ }
+ fs.size = len(fs.data) // Size in runes
+
+ // All done.
+ //
+ return fs, nil
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
new file mode 100644
index 0000000000..b737fe85fb
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "io"
+)
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+// NewIoStream creates a new input stream from the given io.Reader reader.
+// Note that the reader is read completely into memory and so it must actually
+// have a stopping point - you cannot pass in a reader on an open-ended source such
+// as a socket for instance.
+func NewIoStream(reader io.Reader) *InputStream {
+
+ rReader := bufio.NewReader(reader)
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ }
+
+ // Pre-build the buffer and read runes reasonably efficiently given that
+ // we don't exactly know how big the input is.
+ //
+ is.data = make([]rune, 0, 512)
+ for {
+ r, _, err := rReader.ReadRune()
+ if err != nil {
+ break
+ }
+ is.data = append(is.data, r)
+ }
+ is.size = len(is.data) // number of runes
+ return is
+}
+
+// NewInputStream creates a new input stream from the given string
+func NewInputStream(data string) *InputStream {
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ data: []rune(data), // This is actually the most efficient way
+ }
+ is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+// Consume moves the input pointer to the next character in the input stream
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+// LA returns the character at the given offset from the start of the input stream
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+// LT returns the character at the given offset from the start of the input stream
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+// Index returns the current offset in to the input stream
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+// Size returns the total number of characters in the input stream
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// Mark does nothing here as we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+// Release does nothing here as we have entire buffer
+func (is *InputStream) Release(_ int) {
+}
+
+// Seek the input point to the provided index offset
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+// GetText returns the text from the input stream from the start to the stop index
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
+// character of the stop token
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return ""
+}
+
+// String returns the entire input stream as a string
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
new file mode 100644
index 0000000000..4778878bd0
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
new file mode 100644
index 0000000000..cc5066067a
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+// NewInterval creates a new interval with the given start and stop values.
+func NewInterval(start, stop int) Interval {
+ return Interval{
+ Start: start,
+ Stop: stop,
+ }
+}
+
+// Contains returns true if the given item is contained within the interval.
+func (i Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+// String generates a string representation of the interval.
+func (i Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+// Length returns the length of the interval.
+func (i Interval) Length() int {
+ return i.Stop - i.Start
+}
+
+// IntervalSet represents a collection of [Intervals], which may be read-only.
+type IntervalSet struct {
+ intervals []Interval
+ readOnly bool
+}
+
+// NewIntervalSet creates a new empty, writable, interval set.
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) Equals(other *IntervalSet) bool {
+ if len(i.intervals) != len(other.intervals) {
+ return false
+ }
+
+ for k, v := range i.intervals {
+ if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ iLen := 0
+
+ for _, v := range i.intervals {
+ iLen += v.Length()
+ }
+
+ return iLen
+}
+
+func (i *IntervalSet) removeRange(v Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) GetIntervals() []Interval {
+ return i.intervals
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ var sb strings.Builder
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(v.Stop - 1))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
new file mode 100644
index 0000000000..ceccd96d25
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
@@ -0,0 +1,685 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "container/list"
+ "runtime/debug"
+ "sort"
+ "sync"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+type CollectionSource int
+type CollectionDescriptor struct {
+ SybolicName string
+ Description string
+}
+
+const (
+ UnknownCollection CollectionSource = iota
+ ATNConfigLookupCollection
+ ATNStateCollection
+ DFAStateCollection
+ ATNConfigCollection
+ PredictionContextCollection
+ SemanticContextCollection
+ ClosureBusyCollection
+ PredictionVisitedCollection
+ MergeCacheCollection
+ PredictionContextCacheCollection
+ AltSetCollection
+ ReachSetCollection
+)
+
+var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
+ UnknownCollection: {
+ SybolicName: "UnknownCollection",
+ Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
+ },
+ ATNConfigCollection: {
+ SybolicName: "ATNConfigCollection",
+ Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "For instance, it is used to store the results of the closure() operation in the ATN.",
+ },
+ ATNConfigLookupCollection: {
+ SybolicName: "ATNConfigLookupCollection",
+ Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
+ },
+ ATNStateCollection: {
+ SybolicName: "ATNStateCollection",
+ Description: "ATNState collection. This is used to store the states of the ATN.",
+ },
+ DFAStateCollection: {
+ SybolicName: "DFAStateCollection",
+ Description: "DFAState collection. This is used to store the states of the DFA.",
+ },
+ PredictionContextCollection: {
+ SybolicName: "PredictionContextCollection",
+ Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
+ },
+ SemanticContextCollection: {
+ SybolicName: "SemanticContextCollection",
+ Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
+ },
+ ClosureBusyCollection: {
+ SybolicName: "ClosureBusyCollection",
+ Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
+ "It stores ATNConfigs that are currently being processed in the closure() operation.",
+ },
+ PredictionVisitedCollection: {
+ SybolicName: "PredictionVisitedCollection",
+ Description: "A map that records whether we have visited a particular context when searching through cached entries.",
+ },
+ MergeCacheCollection: {
+ SybolicName: "MergeCacheCollection",
+ Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
+ },
+ PredictionContextCacheCollection: {
+ SybolicName: "PredictionContextCacheCollection",
+ Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
+ },
+ AltSetCollection: {
+ SybolicName: "AltSetCollection",
+ Description: "Used to eliminate duplicate alternatives in an ATN config set.",
+ },
+ ReachSetCollection: {
+ SybolicName: "ReachSetCollection",
+ Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
+ },
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+ stats *JStatRec
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ s.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ s.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(s.stats)
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
+
+ if collectStats {
+ s.stats.Puts++
+ }
+ kh := s.comparator.Hash1(value)
+
+ var hClash bool
+ for _, v1 := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(value, v1) {
+ if collectStats {
+ s.stats.PutHits++
+ s.stats.PutHashConflicts++
+ }
+ return v1, true
+ }
+ if collectStats {
+ s.stats.PutMisses++
+ }
+ }
+ if collectStats && hClash {
+ s.stats.PutHashConflicts++
+ }
+ s.store[kh] = append(s.store[kh], value)
+
+ if collectStats {
+ if len(s.store[kh]) > s.stats.MaxSlotSize {
+ s.stats.MaxSlotSize = len(s.store[kh])
+ }
+ }
+ s.len++
+ if collectStats {
+ s.stats.CurSize = s.len
+ if s.len > s.stats.MaxSize {
+ s.stats.MaxSize = s.len
+ }
+ }
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) {
+ if collectStats {
+ s.stats.Gets++
+ }
+ kh := s.comparator.Hash1(key)
+ var hClash bool
+ for _, v := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(key, v) {
+ if collectStats {
+ s.stats.GetHits++
+ s.stats.GetHashConflicts++
+ }
+ return v, true
+ }
+ if collectStats {
+ s.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ s.stats.GetHashConflicts++
+ }
+ s.stats.GetNoEnt++
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool {
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ vs = append(vs, e...)
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+ stats *JStatRec
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
+ m := &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
+ if collectStats {
+ m.stats.Puts++
+ }
+ kh := m.comparator.Hash1(key)
+
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.PutHits++
+ m.stats.PutHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.PutHashConflicts++
+ }
+ }
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ if collectStats {
+ if len(m.store[kh]) > m.stats.MaxSlotSize {
+ m.stats.MaxSlotSize = len(m.store[kh])
+ }
+ }
+ m.len++
+ if collectStats {
+ m.stats.CurSize = m.len
+ if m.len > m.stats.MaxSize {
+ m.stats.MaxSize = m.len
+ }
+ }
+ return val, false
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+ if collectStats {
+ m.stats.Gets++
+ }
+ var none V
+ kh := m.comparator.Hash1(key)
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.GetHits++
+ m.stats.GetHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.GetHashConflicts++
+ }
+ m.stats.GetNoEnt++
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return m.len
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
+
+type JPCMap struct {
+ store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
+ size int
+ stats *JStatRec
+}
+
+func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
+ m := &JPCMap{
+ store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+ // Do we have a map stored by k1?
+ //
+ m2, present := pcm.store.Get(k1)
+ if present {
+ if collectStats {
+ pcm.stats.GetHits++
+ }
+ // We found a map of values corresponding to k1, so now we need to look up k2 in that map
+ //
+ return m2.Get(k2)
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
+
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ // First does a map already exist for k1?
+ //
+ if m2, present := pcm.store.Get(k1); present {
+ if collectStats {
+ pcm.stats.PutHits++
+ }
+ _, present = m2.Put(k2, v)
+ if !present {
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ }
+ } else {
+ // No map found for k1, so we create it, add in our value, then store is
+ //
+ if collectStats {
+ pcm.stats.PutMisses++
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
+ } else {
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
+ }
+
+ m2.Put(k2, v)
+ pcm.store.Put(k1, m2)
+ pcm.size++
+ }
+}
+
+type JPCMap2 struct {
+ store map[int][]JPCEntry
+ size int
+ stats *JStatRec
+}
+
+type JPCEntry struct {
+ k1, k2, v *PredictionContext
+}
+
+func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
+ m := &JPCMap2{
+ store: make(map[int][]JPCEntry, 1000),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func dHash(k1, k2 *PredictionContext) int {
+ return k1.cachedHash*31 + k2.cachedHash
+}
+
+func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.GetHits++
+ pcm.stats.GetHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.GetHashConflicts++
+ }
+ pcm.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.PutHits++
+ pcm.stats.PutHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.PutHashConflicts++
+ }
+ }
+ pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ return nil, false
+}
+
+type VisitEntry struct {
+ k *PredictionContext
+ v *PredictionContext
+}
+type VisitRecord struct {
+ store map[*PredictionContext]*PredictionContext
+ len int
+ stats *JStatRec
+}
+
+type VisitList struct {
+ cache *list.List
+ lock sync.RWMutex
+}
+
+var visitListPool = VisitList{
+ cache: list.New(),
+ lock: sync.RWMutex{},
+}
+
+// NewVisitRecord returns a new VisitRecord instance from the pool if available.
+// Note that this "map" uses a pointer as a key because we are emulating the behavior of
+// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
+// which means is the key the same reference to an object rather than is it .equals() to another
+// object.
+func NewVisitRecord() *VisitRecord {
+ visitListPool.lock.Lock()
+ el := visitListPool.cache.Front()
+ defer visitListPool.lock.Unlock()
+ var vr *VisitRecord
+ if el == nil {
+ vr = &VisitRecord{
+ store: make(map[*PredictionContext]*PredictionContext),
+ }
+ if collectStats {
+ vr.stats = &JStatRec{
+ Source: PredictionContextCacheCollection,
+ Description: "VisitRecord",
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ vr.stats.CreateStack = debug.Stack()
+ }
+ }
+ } else {
+ vr = el.Value.(*VisitRecord)
+ visitListPool.cache.Remove(el)
+ vr.store = make(map[*PredictionContext]*PredictionContext)
+ }
+ if collectStats {
+ Statistics.AddJStatRec(vr.stats)
+ }
+ return vr
+}
+
+func (vr *VisitRecord) Release() {
+ vr.len = 0
+ vr.store = nil
+ if collectStats {
+ vr.stats.MaxSize = 0
+ vr.stats.CurSize = 0
+ vr.stats.Gets = 0
+ vr.stats.GetHits = 0
+ vr.stats.GetMisses = 0
+ vr.stats.GetHashConflicts = 0
+ vr.stats.GetNoEnt = 0
+ vr.stats.Puts = 0
+ vr.stats.PutHits = 0
+ vr.stats.PutMisses = 0
+ vr.stats.PutHashConflicts = 0
+ vr.stats.MaxSlotSize = 0
+ }
+ visitListPool.lock.Lock()
+ visitListPool.cache.PushBack(vr)
+ visitListPool.lock.Unlock()
+}
+
+func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Gets++
+ }
+ v := vr.store[k]
+ if v != nil {
+ if collectStats {
+ vr.stats.GetHits++
+ }
+ return v, true
+ }
+ if collectStats {
+ vr.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Puts++
+ }
+ vr.store[k] = v
+ vr.len++
+ if collectStats {
+ vr.stats.CurSize = vr.len
+ if vr.len > vr.stats.MaxSize {
+ vr.stats.MaxSize = vr.len
+ }
+ }
+ return v, false
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go
new file mode 100644
index 0000000000..3c7896a918
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something non nil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+//goland:noinspection GoUnusedConst
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) Reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+
+ ttype := b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+}
+
+// Skip instructs the lexer to Skip creating a token for current lexer rule
+// and look for another token. [NextToken] knows to keep looking when
+// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
+// will be in force.
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
+// current lexer mode to the supplied mode m.
+func (b *BaseLexer) PushMode(m int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
+// return to.
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.Reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// EmitToken by default does not support multiple emits per [NextToken] invocation
+// for efficiency reasons. Subclass and override this func, [NextToken],
+// and [GetToken] (to push tokens into a list and pull from that list
+// rather than a single variable as this implementation does).
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// Emit is the standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override this method to emit
+// custom [Token] objects or provide a new factory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+// EmitEOF emits an EOF token. By default, this is the last token emitted
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// GetCharIndex returns the index of the current character of lookahead
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// GetText returns the text Matched so far for the current token or any text override.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+// SetText sets the complete text of this token; it wipes any previous changes to the text.
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+// GetATN returns the ATN used by the lexer.
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// GetAllTokens returns a list of all [Token] objects in input char stream.
+// Forces a load of all tokens that can be made from the input char stream.
+//
+// Does not include EOF token.
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Recover can normally Match any char in its vocabulary after Matching
+// a token, so here we do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+//
+// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
+// a character that makes no sense to the recognizer.
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
new file mode 100644
index 0000000000..eaa7393e06
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
@@ -0,0 +1,452 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ // LexerActionTypeChannel represents a [LexerChannelAction] action.
+ LexerActionTypeChannel = 0
+
+ // LexerActionTypeCustom represents a [LexerCustomAction] action.
+ LexerActionTypeCustom = 1
+
+ // LexerActionTypeMode represents a [LexerModeAction] action.
+ LexerActionTypeMode = 2
+
+ // LexerActionTypeMore represents a [LexerMoreAction] action.
+ LexerActionTypeMore = 3
+
+ // LexerActionTypePopMode represents a [LexerPopModeAction] action.
+ LexerActionTypePopMode = 4
+
+ // LexerActionTypePushMode represents a [LexerPushModeAction] action.
+ LexerActionTypePushMode = 5
+
+ // LexerActionTypeSkip represents a [LexerSkipAction] action.
+ LexerActionTypeSkip = 6
+
+ // LexerActionTypeType represents a [LexerTypeAction] action.
+ LexerActionTypeType = 7
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ Hash() int
+ Equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(_ Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, b.actionType)
+ return murmurFinish(h, 1)
+}
+
+func (b *BaseLexerAction) Equals(other LexerAction) bool {
+ return b.actionType == other.getActionType()
+}
+
+// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
+//
+// The Skip command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+// String returns a string representation of the current [LexerSkipAction].
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+func (b *LexerSkipAction) Equals(other LexerAction) bool {
+ return other.getActionType() == LexerActionTypeSkip
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+//
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// LexerPushModeAction implements the pushMode lexer action by calling
+// [Lexer.pushMode] with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
+//
+// The popMode command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
+type LexerPopModeAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerPopModeAction() *LexerPopModeAction {
+
+ l := new(LexerPopModeAction)
+
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
+
+ return l
+}
+
+var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
+
+//
This action is implemented by calling {@link Lexer//popMode}.
This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerMoreAction) execute(lexer Lexer) {
+ lexer.More()
+}
+
+func (l *LexerMoreAction) String() string {
+ return "more"
+}
+
+// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
+// the assigned mode.
+type LexerModeAction struct {
+ *BaseLexerAction
+ mode int
+}
+
+func NewLexerModeAction(mode int) *LexerModeAction {
+ l := new(LexerModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+//
This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+//
Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
+ l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// LexerChannelAction implements the channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
+type LexerChannelAction struct {
+ *BaseLexerAction
+ channel int
+}
+
+// NewLexerChannelAction creates a channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+//
This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
+// with a [LexerAction].
+//
+// Note: This class is only required for lexer actions for which
+// [LexerAction.isPositionDependent] returns true.
+//
+// The offset points into the input [CharStream], relative to
+// the token start index, at which the specified lexerAction should be
+// executed.
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+//
This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.Hash())
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset &&
+ l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
new file mode 100644
index 0000000000..dfc28c32b3
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "golang.org/x/exp/slices"
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+//
The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link ATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(0)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
+ }
+ l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
+
+ return l
+}
+
+// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
+// the input [LexerActionExecutor] followed by a specified
+// [LexerAction].
+// TODO: This does not match the Java code
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// [LexerAction.isPositionDependent] returns true, it calls
+// [IntStream.Seek] on the input [CharStream] to set the input
+// position to the end of the current token. This behavior provides
+// for efficient [DFA] representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the [ATN], the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the [DFA] representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same Length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns this instance.
+//
+// The offset is assigned to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// The func returns a [LexerActionExecutor] that stores input stream offsets
+// for all position-dependent lexer actions.
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
+ updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ }
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+//
This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) Hash() int {
+ if l == nil {
+ // TODO: Why is this here? l should not be nil
+ return 61
+ }
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
new file mode 100644
index 0000000000..fe938b0259
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
@@ -0,0 +1,677 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache *JPCMap2
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := &LexerATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+
+ // line number 1..n within the input
+ l.Line = 1
+
+ // The index of the character relative to the beginning of the line
+ // 0..n-1
+ l.CharPositionInLine = 0
+
+ l.mode = LexerDefaultMode
+
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ var s0 *DFAState
+ l.atn.stateMu.RLock()
+ s0 = dfa.getS0()
+ l.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, s0)
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure, suppressEdge)
+
+ predict := l.execATN(input, next)
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-Length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes new src/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ l.atn.edgeMu.RLock()
+ defer l.atn.edgeMu.RUnlock()
+ if s.getEdges() == nil {
+ return nil
+ }
+ target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
+ if runtimeConfig.lexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
+// computed state and corresponding edge to the [DFA].
+//
+// The func returns the computed target [DFA] state for the given input symbol t.
+// If this does not lead to a valid [DFA] state, this method
+// returns ATNSimulatorError.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a fail-over from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
+// we can reach upon input t.
+//
+// Parameter reach is a return parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a runtimeConfig that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.configs {
+ currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
+ if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := t == TokenEOF
+ config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Printf("ACTION %v\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// closure since the alternatives within any lexer decision are ordered by
+// preference, this method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from runtimeConfig, all other (potentially reachable) states for
+// this rule would have a lower priority.
+//
+// The func returns true if an accept state is reached.
+func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
+ configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
+
+ var cfg *ATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.hasSemanticContext = true
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In this case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// evaluatePredicate eEvaluates a predicate specified in the lexer.
+//
+// If speculative is true, this method was called before
+// [consume] for the Matched character. This method should call
+// [consume] before evaluating the predicate to ensure position
+// sensitive values, including [GetText], [GetLine],
+// and [GetColumn], properly reflect the current
+// lexer state. This method should restore input and the simulator
+// to the original state before returning, i.e. undo the actions made by the
+// call to [Consume].
+//
+// The func returns true if the specified predicate evaluates to true.
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ //
+ suppressEdge := cfgs.hasSemanticContext
+ cfgs.hasSemanticContext = false
+ to = l.addDFAState(cfgs, true)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ l.atn.edgeMu.Lock()
+ defer l.atn.edgeMu.Unlock()
+ if from.getEdges() == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
+ }
+ from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState *ATNConfig
+
+ for _, cfg := range configs.configs {
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ dfa := l.decisionToDFA[l.mode]
+
+ l.atn.stateMu.Lock()
+ defer l.atn.stateMu.Unlock()
+ existing, present := dfa.Get(proposed)
+ if present {
+
+ // This state was already present, so just return it.
+ //
+ proposed = existing
+ } else {
+
+ // We need to add the new state
+ //
+ proposed.stateNumber = dfa.Len()
+ configs.readOnly = true
+ configs.configLookup = nil // Not needed now
+ proposed.configs = configs
+ dfa.Put(proposed)
+ }
+ if !suppressEdge {
+ dfa.setS0(proposed)
+ }
+ return proposed
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// GetText returns the text [Match]ed so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(tt))
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
new file mode 100644
index 0000000000..4955ac876f
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
@@ -0,0 +1,218 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+const (
+ // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
+ // a predicate during analysis if
+ //
+ // seeThruPreds==false
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+// *
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+
+ look[alt] = NewIntervalSet()
+ lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
+
+ // Wipe out lookahead for la alternative if we found nothing,
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+// Look computes the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+//
+// If ctx is nil and the end of the rule containing
+// s is reached, [EPSILON] is added to the result set.
+//
+// If ctx is not nil and the end of the outermost rule is
+// reached, [EOF] is added to the result set.
+//
+// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
+// [BlockEndState] to detect epsilon paths through a closure.
+//
+// Parameter ctx is the complete parser context, or nil if the context
+// should be ignored
+//
+// The func returns the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ var lookContext *PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
+ NewBitSet(), true, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+//
If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+
+ }
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx.pcType != PredictionContextEmpty {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
new file mode 100644
index 0000000000..923c7b52c4
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
@@ -0,0 +1,47 @@
+//go:build !antlr.stats
+
+package antlr
+
+// This file is compiled when the build configuration antlr.stats is not enabled.
+// which then allows the compiler to optimize out all the code that is not used.
+const collectStats = false
+
+// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
+type goRunStats struct {
+}
+
+var Statistics = &goRunStats{}
+
+func (s *goRunStats) AddJStatRec(_ *JStatRec) {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) CollectionAnomalies() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Reset() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Report(dir string, prefix string) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func (s *goRunStats) Analyze() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+type statsOption func(*goRunStats) error
+
+func (s *goRunStats) Configure(options ...statsOption) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ return nil
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser.go
new file mode 100644
index 0000000000..fb57ac15db
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser.go
@@ -0,0 +1,700 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
+// recovery stuff.
+//
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+
+ // The ParserRuleContext object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+
+ // Specifies whether the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+
+ // When setTrace(true) is called, a reference to the
+ // TraceListener is stored here, so it can be easily removed in a
+ // later call to setTrace(false). The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+
+ // The list of ParseTreeListener listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time NotifyErrorListeners is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// This field maps from the serialized ATN string to the deserialized [ATN] with
+// bypass alternatives.
+//
+// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
+//
+//goland:noinspection GoUnusedGlobalVariable
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.HasError() {
+ return nil
+ }
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// AddParseListener registers listener to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+// RemoveParseListener removes listener from the list of parse listeners.
+//
+// If listener is nil or has not been added as a parse
+// listener, this func does nothing.
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// lazily.
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO - Implement this?
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// SetTokenStream installs input as the token stream and resets the parser.
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// GetCurrentToken returns the current token at LT(1).
+//
+// [Match] needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have a new localctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ _, _ = p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+// IsExpectedToken checks whether symbol can follow the current state in the
+// {ATN}. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+// return getExpectedTokens().contains(symbol)
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// GetExpectedTokens and returns the set of input symbols which could follow the current parser
+// state and context, as given by [GetState] and [GetContext],
+// respectively.
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// GetDFAStrings returns a list of all DFA states used for debugging purposes
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// DumpDFA prints the whole of the DFA for debugging
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// SetTrace installs a trace listener for the parse.
+//
+// During a parse it is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. This is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 0000000000..ae2869692a
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1668 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var ()
+
+// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
+// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
+// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
+type ClosureBusy struct {
+ bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
+ desc string
+}
+
+// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
+func NewClosureBusy(desc string) *ClosureBusy {
+ return &ClosureBusy{
+ desc: desc,
+ }
+}
+
+func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
+ if c.bMap == nil {
+ c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
+ }
+ return c.bMap.Put(config)
+}
+
+type ParserATNSimulator struct {
+ BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *JPCMap
+ outerContext ParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := &ParserATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. [JPCMap]
+ // isn't Synchronized, but we're ok since two threads shouldn't reuse same
+ // parser/atn-simulator object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a) -> c should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // whack cache after each prediction
+ // Do not attempt to run a GC now that we're done with the cache as makes the
+ // GC overhead terrible for badly formed grammars and has little effect on well formed
+ // grammars.
+ // I have made some extra effort to try and reduce memory pressure by reusing allocations when
+ // possible. However, it can only have a limited effect. The real solution is to encourage grammar
+ // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
+ // what is happening at runtime, along with using the error listener to report ambiguities.
+
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt, re := p.execATN(dfa, s0, input, index, outerContext)
+ parser.SetError(re)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// execATN performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+//
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+//
+// - If the set is empty, there is no viable alternative for current symbol
+// - Does the state uniquely predict an alternative?
+// - Does the state have a conflict that would prevent us from
+// putting it on the work list?
+//
+// We also have some key operations to do:
+//
+// - Add an edge from previous DFA state to potentially NewDFA state, D,
+// - Upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// - Collecting predicates and adding semantic context to DFA accept states
+// - adding rule context to context-sensitive DFA accept states
+// - Consuming an input symbol
+// - Reporting a conflict
+// - Reporting an ambiguity
+// - Reporting a context sensitivity
+// - Reporting insufficient predicates
+//
+// Cover these cases:
+//
+// - dead end
+// - single alt
+// - single alt + predicates
+// - conflict
+// - conflict + predicates
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ p.parser.SetError(e)
+ return ATNInvalidAltNumber, e
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.conflictingAlts
+ if D.predicates != nil {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue(), nil
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt, re
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction, nil
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ case 1:
+ return alts.minValue(), nil
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue(), nil
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create new target state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.uniqueAlt = predictedAlt
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.conflictingAlts.minValue())
+ }
+ if D.isAcceptState && D.configs.hasSemanticContext {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach *ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.uniqueAlt = p.getUniqueAlt(reach)
+ // unique prediction?
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ predictedAlt = reach.uniqueAlt
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt, nil
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have non-conflicting alt subsets as in:
+ //
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+ //
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt, nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ }
+ intermediate := NewATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*ATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.configs {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach *ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewATNConfigSet(fullCtx)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet this condition whether it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.configs) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
+// configs which are in a [RuleStopState]. If all
+// configurations in configs are already in a rule stop state, this
+// method simply returns configs.
+//
+// When lookToEndOfRule is true, this method uses
+// [ATN].[NextTokens] for each configuration in configs which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// When lookToEndOfRule is true, this method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// configs.
+//
+// The func returns configs if all configurations in configs are in a
+// rule stop state, otherwise it returns a new configuration set containing only
+// the configurations from configs which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewATNConfigSet(configs.fullCtx)
+ for _, config := range configs.configs {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewATNConfigSet(fullCtx)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewATNConfig6(target, i+1, initialContext)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// applyPrecedenceFilter transforms the start state computed by
+// [computeStartState] to the special start state used by a
+// precedence [DFA] for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+// 1. Evaluate the precedence predicates for each configuration using
+// [SemanticContext].evalPrecedence.
+// 2. Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context.
+//
+// Transformation 2 is valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+// The prediction context must be considered by this filter to address
+// situations like the following:
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+// In the above grammar, the [ATN] state immediately before the token
+// reference 'a' in letterA is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// statement. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to prog, and then back in to statement
+// from being eliminated by the filter.
+//
+// The func returns the transformed configuration set representing the start state
+// for a precedence [DFA] at a particular precedence level (determined by
+// calling [Parser].getPrecedence).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]*PredictionContext)
+ configSet := NewATNConfigSet(configs.fullCtx)
+
+ for _, config := range configs.configs {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.configs {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.configs {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // unambiguous alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // un-predicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
+// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
+// Error state was reached during [ATN] simulation.
+//
+// The default implementation of this method uses the following
+// algorithm to identify an [ATN] configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// [ParserRuleContext] returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+// - If a syntactically valid path or paths reach the end of the decision rule, and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+// - Otherwise, return [ATNInvalidAltNumber].
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a [FailedPredicateException] in
+// the parser. Specifically, this could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing this alternative within
+// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
+// [FailedPredicateException] in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+// pass in the configs holding ATN configurations which were valid immediately before
+// the ERROR state was reached, outerContext as the initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// Teh func returns the value to return from [AdaptivePredict], or
+// [ATNInvalidAltNumber] if a suitable alternative was not
+// identified and [AdaptivePredict] should report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.configs) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.configs {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 *ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
+ succeeded := NewATNConfigSet(configs.fullCtx)
+ failed := NewATNConfigSet(configs.fullCtx)
+
+ for _, c := range configs.configs {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []*ATNConfigSet{succeeded, failed}
+}
+
+// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
+// un-predicated runtimeConfig which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ var stack []*ATNConfig
+ visited := make(map[*ATNConfig]bool)
+
+ stack = append(stack, config)
+
+ for len(stack) > 0 {
+ currConfig := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ if _, ok := visited[currConfig]; ok {
+ continue
+ }
+ visited[currConfig] = true
+
+ if _, ok := currConfig.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !currConfig.GetContext().isEmpty() {
+ for i := 0; i < currConfig.GetContext().length(); i++ {
+ if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
+ newContext := currConfig.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
+
+ stack = append(stack, c)
+ }
+ continue
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(currConfig, p.mergeCache)
+ continue
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ }
+ }
+
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if c != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if this is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ newDepth--
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
+ if !runtimeConfig.lRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewATNConfig4(config, t.getTarget())
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the runtimeConfig sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("runtimeConfig from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ;, it has a [DFA]
+// state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]].
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node
+// because alternative to has another way to continue, via
+//
+// [6|2|[]].
+//
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state config-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd non-conflicting configuration for a different alternative:
+//
+// [1|1|[], 1|2|[], 8|3|[]].
+//
+// This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, so we do not
+// stop working on this state.
+//
+// In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.uniqueAlt != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.uniqueAlt)
+ } else {
+ conflictingAlts = configs.conflictingAlts
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in [AdaptivePredict] around [execATN], but I cut
+// it out for clarity now that alg. works well. We can leave this
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.configs {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+//
If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+// addDFAState adds state D to the [DFA] if it is not already present, and returns
+// the actual instance stored in the [DFA]. If a state equivalent to D
+// is already in the [DFA], the existing state is returned. Otherwise, this
+// method returns D after adding it to the [DFA].
+//
+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
+// does not change the DFA.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+
+ existing, present := dfa.Get(d)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
+ return existing
+ }
+
+ // The state will be added if not already there or we will be given back the existing state struct
+ // if it is present.
+ //
+ d.stateNumber = dfa.Len()
+ if !d.configs.readOnly {
+ d.configs.OptimizeConfigs(&p.BaseATNSimulator)
+ d.configs.readOnly = true
+ d.configs.configLookup = nil
+ }
+ dfa.Put(d)
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
+ }
+
+ return d
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
+//
+// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
+// so that they can see that this is happening and can take action if they want to.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
new file mode 100644
index 0000000000..c249bc1385
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
@@ -0,0 +1,421 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+ InitBaseParserRuleContext(prc, parent, invokingStateNumber)
+ return prc
+}
+
+func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
+ // What context invoked b rule?
+ prc.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ prc.invokingState = -1
+ } else {
+ prc.invokingState = invokingStateNumber
+ }
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// EnterRule is called when any rule is entered.
+func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
+}
+
+// ExitRule is called when any rule is exited.
+func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
+// we entered a rule. If we have a label, we will need to remove
+// the generic ruleContext object.
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+func (prc *BaseParserRuleContext) SetParent(v Tree) {
+ if v == nil {
+ prc.parentCtx = nil
+ } else {
+ prc.parentCtx = v.(RuleContext)
+ }
+}
+
+func (prc *BaseParserRuleContext) GetInvokingState() int {
+ return prc.invokingState
+}
+
+func (prc *BaseParserRuleContext) SetInvokingState(t int) {
+ prc.invokingState = t
+}
+
+func (prc *BaseParserRuleContext) GetRuleIndex() int {
+ return prc.RuleIndex
+}
+
+func (prc *BaseParserRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
+
+// IsEmpty returns true if the context of b is empty.
+//
+// A context is empty if there is no invoking state, meaning nobody calls
+// current context.
+func (prc *BaseParserRuleContext) IsEmpty() bool {
+ return prc.invokingState == -1
+}
+
+// GetParent returns the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of this
+// method.
+func (prc *BaseParserRuleContext) GetParent() Tree {
+ return prc.parentCtx
+}
+
+var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
new file mode 100644
index 0000000000..c1b80cc1f0
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
@@ -0,0 +1,727 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+const (
+ // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
+ // doesn't mean wildcard:
+ //
+ // $ + x = [$,x]
+ //
+ // Here,
+ //
+ // $ = EmptyReturnState
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
+//
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+const (
+ PredictionContextEmpty = iota
+ PredictionContextSingleton
+ PredictionContextArray
+)
+
+// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
+// emulate inheritance from Java, and can be used without an interface definition. An interface
+// is not required because no user code will ever need to implement this interface.
+type PredictionContext struct {
+ cachedHash int
+ pcType int
+ parentCtx *PredictionContext
+ returnState int
+ parents []*PredictionContext
+ returnStates []int
+}
+
+func NewEmptyPredictionContext() *PredictionContext {
+ nep := &PredictionContext{}
+ nep.cachedHash = calculateEmptyHash()
+ nep.pcType = PredictionContextEmpty
+ nep.returnState = BasePredictionContextEmptyReturnState
+ return nep
+}
+
+func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
+ pc := &PredictionContext{}
+ pc.pcType = PredictionContextSingleton
+ pc.returnState = returnState
+ pc.parentCtx = parent
+ if parent != nil {
+ pc.cachedHash = calculateHash(parent, returnState)
+ } else {
+ pc.cachedHash = calculateEmptyHash()
+ }
+ return pc
+}
+
+func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ nec := &PredictionContext{}
+ nec.cachedHash = hash
+ nec.pcType = PredictionContextArray
+ nec.parents = parents
+ nec.returnStates = returnStates
+ return nec
+}
+
+func (p *PredictionContext) Hash() int {
+ return p.cachedHash
+}
+
+func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ otherP := other.(*PredictionContext)
+ return other == nil || otherP == nil || otherP.isEmpty()
+ case PredictionContextSingleton:
+ return p.SingletonEquals(other)
+ case PredictionContextArray:
+ return p.ArrayEquals(other)
+ }
+ return false
+}
+
+func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
+ if o == nil {
+ return false
+ }
+ other := o.(*PredictionContext)
+ if other == nil || other.pcType != PredictionContextArray {
+ return false
+ }
+ if p.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(p.returnStates, other.returnStates) &&
+ slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
+ if other == nil {
+ return false
+ }
+ otherP := other.(*PredictionContext)
+ if otherP == nil {
+ return false
+ }
+
+ if p.cachedHash != otherP.Hash() {
+ return false // Can't be same if hash is different
+ }
+
+ if p.returnState != otherP.getReturnState(0) {
+ return false
+ }
+
+ // Both parents must be nil if one is
+ if p.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return p.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (p *PredictionContext) GetParent(i int) *PredictionContext {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return nil
+ case PredictionContextSingleton:
+ return p.parentCtx
+ case PredictionContextArray:
+ return p.parents[i]
+ }
+ return nil
+}
+
+func (p *PredictionContext) getReturnState(i int) int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates[i]
+ default:
+ return p.returnState
+ }
+}
+
+func (p *PredictionContext) GetReturnStates() []int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates
+ default:
+ return []int{p.returnState}
+ }
+}
+
+func (p *PredictionContext) length() int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return len(p.returnStates)
+ default:
+ return 1
+ }
+}
+
+func (p *PredictionContext) hasEmptyPath() bool {
+ switch p.pcType {
+ case PredictionContextSingleton:
+ return p.returnState == BasePredictionContextEmptyReturnState
+ }
+ return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (p *PredictionContext) String() string {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return "$"
+ case PredictionContextSingleton:
+ var up string
+
+ if p.parentCtx == nil {
+ up = ""
+ } else {
+ up = p.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if p.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(p.returnState)
+ }
+
+ return strconv.Itoa(p.returnState) + " " + up
+ case PredictionContextArray:
+ if p.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(p.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if p.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(p.returnStates[i])
+ if !p.parents[i].isEmpty() {
+ s = s + " " + p.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+ return s + "]"
+
+ default:
+ return "unknown"
+ }
+}
+
+func (p *PredictionContext) isEmpty() bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return true
+ case PredictionContextArray:
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return p.returnStates[0] == BasePredictionContextEmptyReturnState
+ default:
+ return false
+ }
+}
+
+func (p *PredictionContext) Type() int {
+ return p.pcType
+}
+
+func calculateHash(parent *PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
+ return mergeSingletons(a, b, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as wildcard
+ if rootIsWildcard {
+ if a.isEmpty() {
+ return a
+ }
+ if b.isEmpty() {
+ return b
+ }
+ }
+
+ // Convert either Singleton or Empty to arrays, so that we can merge them
+ //
+ ara := convertToArray(a)
+ arb := convertToArray(b)
+ return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
+}
+
+func convertToArray(pc *PredictionContext) *PredictionContext {
+ switch pc.Type() {
+ case PredictionContextEmpty:
+ return NewArrayPredictionContext([]*PredictionContext{}, []int{})
+ case PredictionContextSingleton:
+ return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
+ default:
+ // Already an array
+ }
+ return pc
+}
+
+// mergeSingletons merges two Singleton [PredictionContext] instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+//
Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A new root node is created to point to the
+// merged parents.
+//
+//
+//
Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+//
Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ return previous
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent.Equals(a.parentCtx) {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent.Equals(b.parentCtx) {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array.
+ // New joined parent so create a new singleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent *PredictionContext
+ if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []*PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []*PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []*PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+//
Local-Context Merges
+//
+//
These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+//
{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+//
Special case of last merge if local context.
+//
+//
+//
Full-Context Merges
+//
+//
These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+//
Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
+ if rootIsWildcard {
+ if a.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a.isEmpty() && b.isEmpty() {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a.isEmpty() { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+//
Different tops, different parents.
+//
+//
+//
Shared top, same parents.
+//
+//
+//
Shared top, different parents.
+//
+//
+//
Shared top, all shared parents.
+//
+//
+//
Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+//
+//goland:noinspection GoBoolExpressions
+func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.Put(a, b, pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
+ if M.Equals(a) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, a)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M.Equals(b) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, b)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(&mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.Put(a, b, M)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M parents and merge any Equals() ones.
+// Note that we pass a pointer to the slice as we want to modify it in place.
+//
+//goland:noinspection GoUnusedFunction
+func combineCommonParents(parents *[]*PredictionContext) {
+ uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
+
+ for p := 0; p < len(*parents); p++ {
+ parent := (*parents)[p]
+ _, _ = uniqueParents.Put(parent)
+ }
+ for q := 0; q < len(*parents); q++ {
+ pc, _ := uniqueParents.Get((*parents)[q])
+ (*parents)[q] = pc
+ }
+}
+
+func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
+ if context.isEmpty() {
+ return context
+ }
+ existing, present := visited.Get(context)
+ if present {
+ return existing
+ }
+
+ existing, present = contextCache.Get(context)
+ if present {
+ visited.Put(context, existing)
+ return existing
+ }
+ changed := false
+ parents := make([]*PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || !parent.Equals(context.GetParent(i)) {
+ if !changed {
+ parents = make([]*PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited.Put(context, context)
+ return context
+ }
+ var updated *PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited.Put(updated, updated)
+ visited.Put(context, updated)
+
+ return updated
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
new file mode 100644
index 0000000000..25dfb11e8f
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
@@ -0,0 +1,48 @@
+package antlr
+
+var BasePredictionContextEMPTY = &PredictionContext{
+ cachedHash: calculateEmptyHash(),
+ pcType: PredictionContextEmpty,
+ returnState: BasePredictionContextEmptyReturnState,
+}
+
+// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+type PredictionContextCache struct {
+ cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ return &PredictionContextCache{
+ cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
+ }
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a new context to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
+ if ctx.isEmpty() {
+ return BasePredictionContextEMPTY
+ }
+
+ // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
+ // the same pointer), otherwise it will add the new entry and return that.
+ //
+ existing, present := p.cache.Get(ctx)
+ if present {
+ return existing
+ }
+ p.cache.Put(ctx, ctx)
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
+ pc, exists := p.cache.Get(ctx)
+ return pc, exists
+}
+
+func (p *PredictionContextCache) length() int {
+ return p.cache.Len()
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
new file mode 100644
index 0000000000..3f85a6a520
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
@@ -0,0 +1,536 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ // PredictionModeSLL represents the SLL(*) prediction mode.
+ // This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the SLL prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful LL prediction abilities to complete successfully.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+
+ // PredictionModeLL represents the LL(*) prediction mode.
+ // This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+
+ // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
+ // with exact ambiguity detection.
+ //
+ // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases:
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+// # Combined SLL+LL Parsing
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative, e.g.
+//
+// {1,2} and {3,4}
+//
+// If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// # Heuristic
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ';', it has a
+// [DFA] state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]]
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node because alternative to has another way to continue,
+// via
+//
+// [6|2|[]]
+//
+// It also let's us continue for this rule:
+//
+// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state immediately before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// # Pure SLL Parsing
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// # Predicates in SLL+LL Parsing
+//
+// SLL decisions don't evaluate predicates until after they reach [DFA] stop
+// states because they need to create the [DFA] cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation, so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, [ATNConfigSet] combines stack contexts but not
+// semantic predicate contexts, so we might see two configurations like the
+// following:
+//
+// (s, 1, x, {}), (s, 1, x', {p})
+//
+// Before testing these configurations against others, we have to merge
+// x and x' (without modifying the existing configurations).
+// For example, we test (x+x')==x” when looking for conflicts in
+// the following configurations:
+//
+// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
+//
+// If the configuration set has predicates (as indicated by
+// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
+// the configurations to strip out all the predicates so that a standard
+// [ATNConfigSet] will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
+
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input, so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.hasSemanticContext {
+ // dup configs, tossing out semantic predicates
+ dup := NewATNConfigSet(false)
+ for _, c := range configs.configs {
+
+ // NewATNConfig({semanticContext:}, c)
+ c = NewATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar predicates
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
+func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// the func returns true if all configurations in configs are in a
+// [RuleStopState]
+func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
+
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
+//
+// Can we stop looking ahead during [ATN] simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations 'C', into
+// conflicting subsets (s, _, ctx, _) and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical [ATNConfig].state and [ATNConfig].context values
+// but a different [ATNConfig].alt value, e.g.
+//
+// (s, i, ctx, _)
+//
+// and
+//
+// (s, j, ctx, _) ; for i != j
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// A_s,ctx = {i | (s, i, ctx, _)}
+//
+// for each configuration in C holding s and ctx fixed.
+//
+// Or in pseudo-code:
+//
+// for each configuration c in C:
+// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
+//
+// The values in map are the set of
+//
+// A_s,ctx
+//
+// sets.
+//
+// If
+//
+// |A_s,ctx| = 1
+//
+// then there is no conflict associated with s and ctx.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// further lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal; you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// # Conflicting Configs
+//
+// Two configurations:
+//
+// (s, i, x) and (s, j, x')
+//
+// conflict when i != j but x = x'. Because we merge all
+// (s, i, _) configurations together, that means that there are at
+// most n configurations associated with state s for
+// n possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts x and x'.
+//
+// Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either x or x'.
+// If the x associated with lowest alternative i
+// is the superset, then i is the only possible prediction since the
+// others resolve to min(i) as well. However, if x is
+// associated with j > i then at least one stack configuration for
+// j is not in conflict with alternative i. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing an equality check between x and
+// x', which lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// # Continue/Stop Rule
+//
+// Continue if the union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives,
+//
+// [i for (_, i, _)]
+//
+// tells us which alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// Cases
+//
+// - no conflicts and more than 1 alternative in set => continue
+// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
+// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
+// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
+// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
+// {1} ∪ {3} = {1,3} => continue
+//
+// # Exact Ambiguity Detection
+//
+// If all states report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set:
+//
+// |A_i| > 1
+//
+// and
+//
+// A_i = A_j ; for all i, j
+//
+// In other words, we continue examining lookahead until all A_i
+// have more than one alternative and all A_i are the same. If
+//
+// A={{1,2}, {1,3}}
+//
+// then regular LL prediction would terminate because the resolved set is {1}.
+// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like:
+//
+// A={{1,2}}
+//
+// or
+//
+// {{1,2},{1,2}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
+// than one alternative.
+//
+// The func returns true if every [BitSet] in altsets has
+// [BitSet].cardinality cardinality > 1
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
+// exactly one alternative.
+//
+// The func returns true if altsets contains at least one [BitSet] with
+// [BitSet].cardinality cardinality 1
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
+// more than one alternative.
+//
+// The func returns true if altsets contains a [BitSet] with
+// [BitSet].cardinality cardinality > 1, otherwise false
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
+//
+// The func returns true if every member of altsets is equal to the others.
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
+// altsets. If no such alternative exists, this method returns
+// [ATNInvalidAltNumber].
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each [BitSet]
+// in altsets, being the set of represented alternatives in altsets.
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+//
+// for each configuration c in configs:
+// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
+func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
+
+ for _, c := range configs.configs {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
+//
+// for each configuration c in configs:
+// map[c.ATNConfig.state] U= c.ATNConfig.alt}
+func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.configs {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
+// if there is one.
+//
+// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
new file mode 100644
index 0000000000..2e0b504fb3
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+ HasError() bool
+ GetError() RecognitionException
+ SetError(RecognitionException)
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+ SynErr RecognitionException
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var tokenTypeMapCache = make(map[string]int)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.12.0"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) SetError(err RecognitionException) {
+ b.SynErr = err
+}
+
+func (b *BaseRecognizer) HasError() bool {
+ return b.SynErr != nil
+}
+
+func (b *BaseRecognizer) GetError() RecognitionException {
+ return b.SynErr
+}
+
+func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// GetRuleIndexMap Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+// GetTokenType get the token type based upon its name
+func (b *BaseRecognizer) GetTokenType(_ string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// GetErrorHeader returns the error header, normally line/character position information.
+//
+// Can be overridden in sub structs embedding BaseRecognizer.
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+//
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of [ANTLRErrorStrategy] may provide a similar
+// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// Sempred embedding structs need to override this if there are sempreds or actions
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+ return true
+}
+
+// Precpred embedding structs need to override this if there are preceding predicates
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+ return true
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
new file mode 100644
index 0000000000..f2ad04793e
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// RuleContext is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack.
+//
+// We actually carry no information about the rule associated with this context (except
+// when parsing). We keep only the state number of the invoking state from
+// the [ATN] submachine that invoked this. Contrast this with the s
+// pointer inside [ParserRuleContext] that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the struct
+// [ParserRuleContext], which embeds a RuleContext.
+//
+// @see ParserRuleContext
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
new file mode 100644
index 0000000000..68cb9061eb
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
@@ -0,0 +1,464 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// SemanticContext is a tree structure used to record the semantic context in which
+//
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction p1 && p2, or a sum of products p1 || p2.
+//
+// I have scoped the AND, OR, and Predicate subclasses of
+// [SemanticContext] within the scope of this outer ``class''
+type SemanticContext interface {
+ Equals(other Collectable[SemanticContext]) bool
+ Hash() int
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, p.ruleIndex)
+ h = murmurUpdate(h, p.predIndex)
+ if p.isCtxDependent {
+ h = murmurUpdate(h, 1)
+ } else {
+ h = murmurUpdate(h, 0)
+ }
+ return murmurFinish(h, 3)
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
+
+ var op *PrecedencePredicate
+ var ok bool
+ if op, ok = other.(*PrecedencePredicate); !ok {
+ return false
+ }
+
+ if p == op {
+ return true
+ }
+
+ return p.precedence == other.(*PrecedencePredicate).precedence
+}
+
+func (p *PrecedencePredicate) Hash() int {
+ h := uint32(1)
+ h = 31*h + uint32(p.precedence)
+ return int(h)
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ set.Each(func(v SemanticContext) bool {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ return true
+ })
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+ opnds := make([]SemanticContext, len(vs))
+ copy(opnds, vs)
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) Equals(other Collectable[SemanticContext]) bool {
+ if a == other {
+ return true
+ }
+ if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) Hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (o *OR) Hash() int {
+ h := murmurInit(41) // Init with o value different from AND
+ for _, op := range o.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(o.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+
+ opnds := make([]SemanticContext, len(vs))
+ copy(opnds, vs)
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) Equals(other Collectable[SemanticContext]) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go
new file mode 100644
index 0000000000..70c0673a0f
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go
@@ -0,0 +1,281 @@
+//go:build antlr.stats
+
+package antlr
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
+// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
+//
+
+// Tells various components to collect statistics - because it is only true when this file is included, it will
+// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
+const collectStats = true
+
+// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
+// It is exported so that it can be used by others to look for things that are not already looked for in the
+// runtime statistics.
+type goRunStats struct {
+
+ // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
+ // during a run. It is exported so that it can be used by others to look for things that are not already looked for
+ // within this package.
+ //
+ jStats []*JStatRec
+ jStatsLock sync.RWMutex
+ topN int
+ topNByMax []*JStatRec
+ topNByUsed []*JStatRec
+ unusedCollections map[CollectionSource]int
+ counts map[CollectionSource]int
+}
+
+const (
+ collectionsFile = "collections"
+)
+
+var (
+ Statistics = &goRunStats{
+ topN: 10,
+ }
+)
+
+type statsOption func(*goRunStats) error
+
+// Configure allows the statistics system to be configured as the user wants and override the defaults
+func (s *goRunStats) Configure(options ...statsOption) error {
+ for _, option := range options {
+ err := option(s)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
+//
+// For example, if you want to see the top 20 collections by size, you can do:
+//
+// antlr.Statistics.Configure(antlr.WithTopN(20))
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ s.topN = topN
+ return nil
+ }
+}
+
+// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
+//
+// The function gathers and analyzes a number of statistics about any particular run of
+// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
+// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
+// especially useful in tracking down bugs or performance problems when an ANTLR user could
+// supply the output from this package, but cannot supply the grammar file(s) they are using, even
+// privately to the maintainers.
+//
+// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
+// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
+// extant unless the calling program is built with the antlr.stats tag like so:
+//
+// go build -tags antlr.stats .
+//
+// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
+// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
+// [Statistics.Report] function to report the statistics.
+//
+// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
+// me [Jim Idle] directly at jimi@idle.ws
+//
+// [Jim Idle]: https:://github.com/jim-idle
+func (s *goRunStats) Analyze() {
+
+ // Look for anything that looks strange and record it in our local maps etc for the report to present it
+ //
+ s.CollectionAnomalies()
+ s.TopNCollections()
+}
+
+// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
+func (s *goRunStats) TopNCollections() {
+
+ // Let's sort the stat records by MaxSize
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].MaxSize > s.jStats[j].MaxSize
+ })
+
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByMax = append(s.topNByMax, s.jStats[i])
+ }
+
+ // Sort by the number of times used
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
+ })
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByUsed = append(s.topNByUsed, s.jStats[i])
+ }
+}
+
+// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
+// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
+// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
+func (s *goRunStats) Report(dir string, prefix string) error {
+
+ isDir, err := isDirectory(dir)
+ switch {
+ case err != nil:
+ return err
+ case !isDir:
+ return fmt.Errorf("output directory `%s` is not a directory", dir)
+ }
+ s.reportCollections(dir, prefix)
+
+ // Clean out any old data in case the user forgets
+ //
+ s.Reset()
+ return nil
+}
+
+func (s *goRunStats) Reset() {
+ s.jStats = nil
+ s.topNByUsed = nil
+ s.topNByMax = nil
+}
+
+func (s *goRunStats) reportCollections(dir, prefix string) {
+ cname := filepath.Join(dir, ".asciidoctor")
+ // If the file doesn't exist, create it, or append to the file
+ f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, _ = f.WriteString(`// .asciidoctorconfig
+++++
+
+++++`)
+ _ = f.Close()
+
+ fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
+ // If the file doesn't exist, create it, or append to the file
+ f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }(f)
+ _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
+
+ _, _ = f.WriteString("== Summary\n")
+
+ if s.unusedCollections != nil {
+ _, _ = f.WriteString("=== Unused Collections\n")
+ _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
+ _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
+ _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
+ _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
+ _, _ = f.WriteString(" actually needed.\n\n")
+
+ _, _ = f.WriteString("\n.Unused collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+
+ for k, v := range s.unusedCollections {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ f.WriteString("|===\n\n")
+ }
+
+ _, _ = f.WriteString("\n.Summary of Collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+ for k, v := range s.counts {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
+ for _, c := range s.topNByMax {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
+ for _, c := range s.topNByUsed {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+}
+
+// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
+func (s *goRunStats) AddJStatRec(rec *JStatRec) {
+ s.jStatsLock.Lock()
+ defer s.jStatsLock.Unlock()
+ s.jStats = append(s.jStats, rec)
+}
+
+// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
+func (s *goRunStats) CollectionAnomalies() {
+ s.jStatsLock.RLock()
+ defer s.jStatsLock.RUnlock()
+ s.counts = make(map[CollectionSource]int, len(s.jStats))
+ for _, c := range s.jStats {
+
+ // Accumlate raw counts
+ //
+ s.counts[c.Source]++
+
+ // Look for allocated but unused collections and count them
+ if c.MaxSize == 0 && c.Puts == 0 {
+ if s.unusedCollections == nil {
+ s.unusedCollections = make(map[CollectionSource]int)
+ }
+ s.unusedCollections[c.Source]++
+ }
+ if c.MaxSize > 6000 {
+ fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
+ }
+ }
+
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
new file mode 100644
index 0000000000..4d9eb94e5f
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
@@ -0,0 +1,23 @@
+package antlr
+
+// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
+// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
+// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
+// for ideas on what can be gleaned from these statistics about collections.
+type JStatRec struct {
+ Source CollectionSource
+ MaxSize int
+ CurSize int
+ Gets int
+ GetHits int
+ GetMisses int
+ GetHashConflicts int
+ GetNoEnt int
+ Puts int
+ PutHits int
+ PutMisses int
+ PutHashConflicts int
+ MaxSlotSize int
+ Description string
+ CreateStack []byte
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token.go
new file mode 100644
index 0000000000..9670efb829
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+
+ String() string
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
+ //
+ // All tokens go to the parser (unless [Skip] is called in the lexer rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+ TokenDefaultChannel = 0
+
+ // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
+ //
+ // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := &CommonToken{
+ BaseToken: BaseToken{
+ source: source,
+ tokenType: tokenType,
+ channel: channel,
+ start: start,
+ stop: stop,
+ tokenIndex: -1,
+ },
+ }
+
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_source.go
new file mode 100644
index 0000000000..a3f36eaa67
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
new file mode 100644
index 0000000000..bf4ff6633e
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+ Reset()
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
new file mode 100644
index 0000000000..ccf59b465c
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
@@ -0,0 +1,662 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "fmt"
+)
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+const (
+ DefaultProgramName = "default"
+ ProgramInitSize = 100
+ MinTokenIndex = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instructionIndex int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ opName string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
+ return op.instructionIndex
+}
+
+func (op *BaseRewriteOperation) GetIndex() int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) GetText() string {
+ return op.text
+}
+
+func (op *BaseRewriteOperation) GetOpName() string {
+ return op.opName
+}
+
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
+ op.instructionIndex = val
+}
+
+func (op *BaseRewriteOperation) SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation) SetText(val string) {
+ op.text = val
+}
+
+func (op *BaseRewriteOperation) SetOpName(val string) {
+ op.opName = val
+}
+
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.opName,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ opName: "InsertBeforeOp",
+ tokens: stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
+// first and then the "insert before" instructions at same index. Implementation
+// of "insert after" is "insert before index+1".
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ },
+ }
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct {
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ opName: "ReplaceOp",
+ tokens: stream,
+ },
+ LastIndex: to,
+ }
+}
+
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex + 1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ lastRewriteTokenIndexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ },
+ lastRewriteTokenIndexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
+ is, ok := tsr.programs[programName]
+ if ok {
+ tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
+ tsr.Rollback(DefaultProgramName, instructionIndex)
+}
+
+// DeleteProgram Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
+ tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
+ tsr.DeleteProgram(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
+ tsr.InsertAfter(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
+ tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
+ tsr.InsertBefore(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
+ tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
+ tsr.Replace(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
+ tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
+ tsr.ReplaceToken(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
+ tsr.Replace(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
+ tsr.Delete(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
+}
+
+func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
+ tsr.ReplaceToken(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
+ tsr.DeleteToken(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
+ i, ok := tsr.lastRewriteTokenIndexes[programName]
+ if !ok {
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
+ return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
+ tsr.lastRewriteTokenIndexes[programName] = i
+}
+
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
+ is := make([]RewriteOperation, 0, ProgramInitSize)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok {
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+
+// GetTextDefault returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
+ return tsr.GetText(
+ DefaultProgramName,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+
+// GetText returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
+ rewrites := tsr.programs[programName]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start, 0)
+ if len(rewrites) == 0 {
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i := start; i <= stop && i < tsr.tokens.Size(); {
+ op := indexToOp[i]
+ delete(indexToOp, i) // remove so any left have index size-1
+ t := tsr.tokens.Get(i)
+ if op == nil {
+ // no operation at that index, just dump token
+ if t.GetTokenType() != TokenEOF {
+ buf.WriteString(t.GetText())
+ }
+ i++ // move to next token
+ } else {
+ i = op.Execute(&buf) // execute operation and skip
+ }
+ }
+ // include stuff after end if it's last index in buffer
+ // So, if they did an insertAfter(lastValidIndex, "foo"), include
+ // foo if end==lastValidIndex.
+ if stop == tsr.tokens.Size()-1 {
+ // Scan any remaining operations after last token
+ // should be included (they will be inserts).
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
+ }
+ }
+ return buf.String()
+}
+
+// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc...
+//
+// Here are the cases:
+//
+// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this 'replace'.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the 'replace' range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// The func returns a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
+ // WALK REPLACES
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ rop, ok := op.(*ReplaceOp)
+ if !ok {
+ continue
+ }
+ // Wipe prior inserts within range
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if iop.index == rop.index {
+ // E.g., insert before 2, delete 2..2; update replace
+ // text to include insert before, kill insert
+ rewrites[iop.instructionIndex] = nil
+ if rop.text != "" {
+ rop.text = iop.text + rop.text
+ } else {
+ rop.text = iop.text
+ }
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
+ // delete insert as it's a no-op.
+ rewrites[iop.instructionIndex] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
+ // delete replace as it's a no-op.
+ rewrites[prevop.instructionIndex] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint {
+ rewrites[prevop.instructionIndex] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ } else if !disjoint {
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok {
+ continue
+ }
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
+ if nextIop.index == iop.GetIndex() {
+ iop.SetText(nextIop.text + iop.GetText())
+ rewrites[j] = nil
+ }
+ }
+ if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if prevIop.index == iop.GetIndex() {
+ iop.SetText(iop.GetText() + prevIop.text)
+ rewrites[prevIop.instructionIndex] = nil
+ }
+ }
+ }
+ // look for replaces where iop.index is in range; error
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if rop, ok := rewrites[j].(*ReplaceOp); ok {
+ if iop.GetIndex() == rop.index {
+ rop.text = iop.GetText() + rop.text
+ rewrites[i] = nil
+ continue
+ }
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+/*
+ Quick fixing Go lack of overloads
+*/
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
+func min(a, b int) int {
+ if a < b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
new file mode 100644
index 0000000000..7b663bf849
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type TraceListener struct {
+ parser *BaseParser
+}
+
+func NewTraceListener(parser *BaseParser) *TraceListener {
+ tl := new(TraceListener)
+ tl.parser = parser
+ return tl
+}
+
+func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
+}
+
+func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
+ fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
+
+func (t *TraceListener) VisitTerminal(node TerminalNode) {
+ fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
+}
+
+func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
+ fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/transition.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/transition.go
new file mode 100644
index 0000000000..313b0fc127
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/transition.go
@@ -0,0 +1,439 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// atom, set, epsilon, action, predicate, rule transitions.
+//
+//
This is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+//
Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(_, _, _ int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// AtomTransition
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+ t := &AtomTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionATOM,
+ label: intervalSet,
+ isEpsilon: false,
+ },
+ }
+ t.intervalSet = t.makeLabel()
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, _, _ int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ BaseTransition
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+ return &RuleTransition{
+ BaseTransition: BaseTransition{
+ target: ruleStart,
+ isEpsilon: true,
+ serializationType: TransitionRULE,
+ },
+ ruleIndex: ruleIndex,
+ precedence: precedence,
+ followState: followState,
+ }
+}
+
+func (t *RuleTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ BaseTransition
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+ return &EpsilonTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionEPSILON,
+ isEpsilon: true,
+ },
+ outermostPrecedenceReturn: outermostPrecedenceReturn,
+ }
+}
+
+func (t *EpsilonTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ BaseTransition
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+ t := &RangeTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionRANGE,
+ isEpsilon: false,
+ },
+ start: start,
+ stop: stop,
+ }
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, _, _ int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+ return &BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ },
+ }
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ BaseAbstractPredicateTransition
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+ return &PredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPREDICATE,
+ isEpsilon: true,
+ },
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ predIndex: predIndex,
+ }
+}
+
+func (t *PredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ BaseTransition
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+ return &ActionTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionACTION,
+ isEpsilon: true,
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ actionIndex: actionIndex,
+ }
+}
+
+func (t *ActionTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+ t := &SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionSET,
+ },
+ }
+
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, _, _ int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+ t := &NotSetTransition{
+ SetTransition: SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionNOTSET,
+ },
+ },
+ }
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+ return &WildcardTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionWILDCARD,
+ },
+ }
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ BaseAbstractPredicateTransition
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+ return &PrecedencePredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPRECEDENCE,
+ isEpsilon: true,
+ },
+ },
+ precedence: precedence,
+ }
+}
+
+func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tree.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tree.go
new file mode 100644
index 0000000000..c288420fb2
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tree.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+ GetSourceInterval() Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+ GetRuleContext() RuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
+
+// TODO: Implement this?
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(_ int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Walk performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, [EnterRule] is called before
+// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
+// then by triggering the event specific to the given parse tree node
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event [ParseTreeListener].ExitEveryRule
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var ParseTreeWalkerDefault = NewParseTreeWalker()
+
+type IterativeParseTreeWalker struct {
+ *ParseTreeWalker
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
+ return new(IterativeParseTreeWalker)
+}
+
+func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ var stack []Tree
+ var indexStack []int
+ currentNode := t
+ currentIndex := 0
+
+ for currentNode != nil {
+ // pre-order visit
+ switch tt := currentNode.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ i.EnterRule(listener, currentNode.(RuleNode))
+ }
+ // Move down to first child, if exists
+ if currentNode.GetChildCount() > 0 {
+ stack = append(stack, currentNode)
+ indexStack = append(indexStack, currentIndex)
+ currentIndex = 0
+ currentNode = currentNode.GetChild(0)
+ continue
+ }
+
+ for {
+ // post-order visit
+ if ruleNode, ok := currentNode.(RuleNode); ok {
+ i.ExitRule(listener, ruleNode)
+ }
+ // No parent, so no siblings
+ if len(stack) == 0 {
+ currentNode = nil
+ currentIndex = 0
+ break
+ }
+ // Move to next sibling if possible
+ currentIndex++
+ if stack[len(stack)-1].GetChildCount() > currentIndex {
+ currentNode = stack[len(stack)-1].GetChild(currentIndex)
+ break
+ }
+ // No next, sibling, so move up
+ currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
+ currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
+ }
+ }
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trees.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trees.go
new file mode 100644
index 0000000000..f44c05d811
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trees.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+/** A set of utility routines useful for all kinds of ANTLR trees. */
+
+// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
+// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
+func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
+
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ s := TreesGetNodeText(tree, ruleNames, nil)
+
+ s = EscapeWhitespace(s, false)
+ c := tree.GetChildCount()
+ if c == 0 {
+ return s
+ }
+ res := "(" + s + " "
+ if c > 0 {
+ s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
+ res += s
+ }
+ for i := 1; i < c; i++ {
+ s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
+ res += " " + s
+ }
+ res += ")"
+ return res
+}
+
+func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ if ruleNames != nil {
+ switch t2 := t.(type) {
+ case RuleNode:
+ t3 := t2.GetRuleContext()
+ altNumber := t3.GetAltNumber()
+
+ if altNumber != ATNInvalidAltNumber {
+ return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
+ }
+ return ruleNames[t3.GetRuleIndex()]
+ case ErrorNode:
+ return fmt.Sprint(t2)
+ case TerminalNode:
+ if t2.GetSymbol() != nil {
+ return t2.GetSymbol().GetText()
+ }
+ }
+ }
+
+ // no recognition for rule names
+ payload := t.GetPayload()
+ if p2, ok := payload.(Token); ok {
+ return p2.GetText()
+ }
+
+ return fmt.Sprint(t.GetPayload())
+}
+
+// TreesGetChildren returns am ordered list of all children of this node
+//
+//goland:noinspection GoUnusedExportedFunction
+func TreesGetChildren(t Tree) []Tree {
+ list := make([]Tree, 0)
+ for i := 0; i < t.GetChildCount(); i++ {
+ list = append(list, t.GetChild(i))
+ }
+ return list
+}
+
+// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
+// and the last node is the parent of this node.
+//
+//goland:noinspection GoUnusedExportedFunction
+func TreesgetAncestors(t Tree) []Tree {
+ ancestors := make([]Tree, 0)
+ t = t.GetParent()
+ for t != nil {
+ f := []Tree{t}
+ ancestors = append(f, ancestors...)
+ t = t.GetParent()
+ }
+ return ancestors
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
+ return TreesfindAllNodes(t, ttype, true)
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
+ return TreesfindAllNodes(t, ruleIndex, false)
+}
+
+func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
+ nodes := make([]ParseTree, 0)
+ treesFindAllNodes(t, index, findTokens, &nodes)
+ return nodes
+}
+
+func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
+ // check this node (the root) first
+
+ t2, ok := t.(TerminalNode)
+ t3, ok2 := t.(ParserRuleContext)
+
+ if findTokens && ok {
+ if t2.GetSymbol().GetTokenType() == index {
+ *nodes = append(*nodes, t2)
+ }
+ } else if !findTokens && ok2 {
+ if t3.GetRuleIndex() == index {
+ *nodes = append(*nodes, t3)
+ }
+ }
+ // check children
+ for i := 0; i < t.GetChildCount(); i++ {
+ treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
+ }
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TreesDescendants(t ParseTree) []ParseTree {
+ nodes := []ParseTree{t}
+ for i := 0; i < t.GetChildCount(); i++ {
+ nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
+ }
+ return nodes
+}
diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go
new file mode 100644
index 0000000000..733d7df9dc
--- /dev/null
+++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go
@@ -0,0 +1,328 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+// NewBitSet creates a new bitwise set
+// TODO: See if we can replace with the standard library's BitSet
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ // Get min size necessary to represent the bits in both sets.
+ bLen := b.minLen()
+ setLen := set.minLen()
+ maxLen := intMax(bLen, setLen)
+ if maxLen > len(b.data) {
+ // Increase the size of len(b.data) to represent the bits in both sets.
+ data := make([]uint64, maxLen)
+ copy(data, b.data)
+ b.data = data
+ }
+ // len(b.data) is at least setLen.
+ for i := 0; i < setLen; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ // We only compare set bits, so we cannot rely on the two slices having the same size. Its
+ // possible for two BitSets to have different slice lengths but the same set bits. So we only
+ // compare the relevant words and ignore the trailing zeros.
+ bLen := b.minLen()
+ otherLen := otherBitSet.minLen()
+
+ if bLen != otherLen {
+ return false
+ }
+
+ for i := 0; i < bLen; i++ {
+ if b.data[i] != otherBitSet.data[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
+
+func isDirectory(dir string) (bool, error) {
+ fileInfo, err := os.Stat(dir)
+ if err != nil {
+ switch {
+ case errors.Is(err, syscall.ENOENT):
+ // The given directory does not exist, so we will try to create it
+ //
+ err = os.MkdirAll(dir, 0755)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+ case err != nil:
+ return false, err
+ default:
+ }
+ }
+ return fileInfo.IsDir(), err
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/.travis.yml b/hack/tools/vendor/github.com/asaskevich/govalidator/.travis.yml
new file mode 100644
index 0000000000..e29f8eef5e
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+
+notifications:
+ email:
+ - bwatas@gmail.com
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/hack/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
new file mode 100644
index 0000000000..f0f7e3a8ad
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
@@ -0,0 +1,63 @@
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Financial contributions
+
+We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
+Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
+
+
+## Credits
+
+
+### Contributors
+
+Thank you to all the people who have already contributed to govalidator!
+
+
+
+### Backers
+
+Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/LICENSE b/hack/tools/vendor/github.com/asaskevich/govalidator/LICENSE
new file mode 100644
index 0000000000..2f9a31fadf
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Alex Saskevich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/README.md b/hack/tools/vendor/github.com/asaskevich/govalidator/README.md
new file mode 100644
index 0000000000..40f9a87811
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/README.md
@@ -0,0 +1,507 @@
+govalidator
+===========
+[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator) [](https://coveralls.io/r/asaskevich/govalidator?branch=master) [](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
+[](https://travis-ci.org/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
+
+A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
+
+#### Installation
+Make sure that Go is installed on your computer.
+Type the following command in your terminal:
+
+ go get github.com/asaskevich/govalidator
+
+or you can get specified release of the package with `gopkg.in`:
+
+ go get gopkg.in/asaskevich/govalidator.v4
+
+After it the package is ready to use.
+
+
+#### Import package in your project
+Add following line in your `*.go` file:
+```go
+import "github.com/asaskevich/govalidator"
+```
+If you are unhappy to use long `govalidator`, you can do something like this:
+```go
+import (
+ valid "github.com/asaskevich/govalidator"
+)
+```
+
+#### Activate behavior to require all fields have a validation tag by default
+`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
+
+`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
+
+```go
+import "github.com/asaskevich/govalidator"
+
+func init() {
+ govalidator.SetFieldsRequiredByDefault(true)
+}
+```
+
+Here's some code to explain it:
+```go
+// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+type exampleStruct struct {
+ Name string ``
+ Email string `valid:"email"`
+}
+
+// this, however, will only fail when Email is empty or an invalid email address:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email"`
+}
+
+// lastly, this will only fail when Email is an invalid email address but not when it's empty:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email,optional"`
+}
+```
+
+#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
+##### Custom validator function signature
+A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
+```go
+import "github.com/asaskevich/govalidator"
+
+// old signature
+func(i interface{}) bool
+
+// new signature
+func(i interface{}, o interface{}) bool
+```
+
+##### Adding a custom validator
+This was changed to prevent data races when accessing custom validators.
+```go
+import "github.com/asaskevich/govalidator"
+
+// before
+govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
+ // ...
+})
+
+// after
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
+ // ...
+}))
+```
+
+#### List of functions:
+```go
+func Abs(value float64) float64
+func BlackList(str, chars string) string
+func ByteLength(str string, params ...string) bool
+func CamelCaseToUnderscore(str string) string
+func Contains(str, substring string) bool
+func Count(array []interface{}, iterator ConditionIterator) int
+func Each(array []interface{}, iterator Iterator)
+func ErrorByField(e error, field string) string
+func ErrorsByField(e error) map[string]string
+func Filter(array []interface{}, iterator ConditionIterator) []interface{}
+func Find(array []interface{}, iterator ConditionIterator) interface{}
+func GetLine(s string, index int) (string, error)
+func GetLines(s string) []string
+func InRange(value, left, right float64) bool
+func IsASCII(str string) bool
+func IsAlpha(str string) bool
+func IsAlphanumeric(str string) bool
+func IsBase64(str string) bool
+func IsByteLength(str string, min, max int) bool
+func IsCIDR(str string) bool
+func IsCreditCard(str string) bool
+func IsDNSName(str string) bool
+func IsDataURI(str string) bool
+func IsDialString(str string) bool
+func IsDivisibleBy(str, num string) bool
+func IsEmail(str string) bool
+func IsFilePath(str string) (bool, int)
+func IsFloat(str string) bool
+func IsFullWidth(str string) bool
+func IsHalfWidth(str string) bool
+func IsHexadecimal(str string) bool
+func IsHexcolor(str string) bool
+func IsHost(str string) bool
+func IsIP(str string) bool
+func IsIPv4(str string) bool
+func IsIPv6(str string) bool
+func IsISBN(str string, version int) bool
+func IsISBN10(str string) bool
+func IsISBN13(str string) bool
+func IsISO3166Alpha2(str string) bool
+func IsISO3166Alpha3(str string) bool
+func IsISO693Alpha2(str string) bool
+func IsISO693Alpha3b(str string) bool
+func IsISO4217(str string) bool
+func IsIn(str string, params ...string) bool
+func IsInt(str string) bool
+func IsJSON(str string) bool
+func IsLatitude(str string) bool
+func IsLongitude(str string) bool
+func IsLowerCase(str string) bool
+func IsMAC(str string) bool
+func IsMongoID(str string) bool
+func IsMultibyte(str string) bool
+func IsNatural(value float64) bool
+func IsNegative(value float64) bool
+func IsNonNegative(value float64) bool
+func IsNonPositive(value float64) bool
+func IsNull(str string) bool
+func IsNumeric(str string) bool
+func IsPort(str string) bool
+func IsPositive(value float64) bool
+func IsPrintableASCII(str string) bool
+func IsRFC3339(str string) bool
+func IsRFC3339WithoutZone(str string) bool
+func IsRGBcolor(str string) bool
+func IsRequestURI(rawurl string) bool
+func IsRequestURL(rawurl string) bool
+func IsSSN(str string) bool
+func IsSemver(str string) bool
+func IsTime(str string, format string) bool
+func IsURL(str string) bool
+func IsUTFDigit(str string) bool
+func IsUTFLetter(str string) bool
+func IsUTFLetterNumeric(str string) bool
+func IsUTFNumeric(str string) bool
+func IsUUID(str string) bool
+func IsUUIDv3(str string) bool
+func IsUUIDv4(str string) bool
+func IsUUIDv5(str string) bool
+func IsUpperCase(str string) bool
+func IsVariableWidth(str string) bool
+func IsWhole(value float64) bool
+func LeftTrim(str, chars string) string
+func Map(array []interface{}, iterator ResultIterator) []interface{}
+func Matches(str, pattern string) bool
+func NormalizeEmail(str string) (string, error)
+func PadBoth(str string, padStr string, padLen int) string
+func PadLeft(str string, padStr string, padLen int) string
+func PadRight(str string, padStr string, padLen int) string
+func Range(str string, params ...string) bool
+func RemoveTags(s string) string
+func ReplacePattern(str, pattern, replace string) string
+func Reverse(s string) string
+func RightTrim(str, chars string) string
+func RuneLength(str string, params ...string) bool
+func SafeFileName(str string) string
+func SetFieldsRequiredByDefault(value bool)
+func Sign(value float64) float64
+func StringLength(str string, params ...string) bool
+func StringMatches(s string, params ...string) bool
+func StripLow(str string, keepNewLines bool) string
+func ToBoolean(str string) (bool, error)
+func ToFloat(str string) (float64, error)
+func ToInt(str string) (int64, error)
+func ToJSON(obj interface{}) (string, error)
+func ToString(obj interface{}) string
+func Trim(str, chars string) string
+func Truncate(str string, length int, ending string) string
+func UnderscoreToCamelCase(s string) string
+func ValidateStruct(s interface{}) (bool, error)
+func WhiteList(str, chars string) string
+type ConditionIterator
+type CustomTypeValidator
+type Error
+func (e Error) Error() string
+type Errors
+func (es Errors) Error() string
+func (es Errors) Errors() []error
+type ISO3166Entry
+type Iterator
+type ParamValidator
+type ResultIterator
+type UnsupportedTypeError
+func (e *UnsupportedTypeError) Error() string
+type Validator
+```
+
+#### Examples
+###### IsURL
+```go
+println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
+```
+###### ToString
+```go
+type User struct {
+ FirstName string
+ LastName string
+}
+
+str := govalidator.ToString(&User{"John", "Juan"})
+println(str)
+```
+###### Each, Map, Filter, Count for slices
+Each iterates over the slice/array and calls Iterator for every item
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.Iterator = func(value interface{}, index int) {
+ println(value.(int))
+}
+govalidator.Each(data, fn)
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
+ return value.(int) * 3
+}
+_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
+ return value.(int)%2 == 0
+}
+_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
+_ = govalidator.Count(data, fn) // result = 5
+```
+###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
+If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
+```go
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+```
+For completely custom validators (interface-based), see below.
+
+Here is a list of available validators for struct fields (validator - used function):
+```go
+"email": IsEmail,
+"url": IsURL,
+"dialstring": IsDialString,
+"requrl": IsRequestURL,
+"requri": IsRequestURI,
+"alpha": IsAlpha,
+"utfletter": IsUTFLetter,
+"alphanum": IsAlphanumeric,
+"utfletternum": IsUTFLetterNumeric,
+"numeric": IsNumeric,
+"utfnumeric": IsUTFNumeric,
+"utfdigit": IsUTFDigit,
+"hexadecimal": IsHexadecimal,
+"hexcolor": IsHexcolor,
+"rgbcolor": IsRGBcolor,
+"lowercase": IsLowerCase,
+"uppercase": IsUpperCase,
+"int": IsInt,
+"float": IsFloat,
+"null": IsNull,
+"uuid": IsUUID,
+"uuidv3": IsUUIDv3,
+"uuidv4": IsUUIDv4,
+"uuidv5": IsUUIDv5,
+"creditcard": IsCreditCard,
+"isbn10": IsISBN10,
+"isbn13": IsISBN13,
+"json": IsJSON,
+"multibyte": IsMultibyte,
+"ascii": IsASCII,
+"printableascii": IsPrintableASCII,
+"fullwidth": IsFullWidth,
+"halfwidth": IsHalfWidth,
+"variablewidth": IsVariableWidth,
+"base64": IsBase64,
+"datauri": IsDataURI,
+"ip": IsIP,
+"port": IsPort,
+"ipv4": IsIPv4,
+"ipv6": IsIPv6,
+"dns": IsDNSName,
+"host": IsHost,
+"mac": IsMAC,
+"latitude": IsLatitude,
+"longitude": IsLongitude,
+"ssn": IsSSN,
+"semver": IsSemver,
+"rfc3339": IsRFC3339,
+"rfc3339WithoutZone": IsRFC3339WithoutZone,
+"ISO3166Alpha2": IsISO3166Alpha2,
+"ISO3166Alpha3": IsISO3166Alpha3,
+```
+Validators with parameters
+
+```go
+"range(min|max)": Range,
+"length(min|max)": ByteLength,
+"runelength(min|max)": RuneLength,
+"stringlength(min|max)": StringLength,
+"matches(pattern)": StringMatches,
+"in(string1|string2|...|stringN)": IsIn,
+"rsapub(keylength)" : IsRsaPub,
+```
+
+And here is small example of usage:
+```go
+type Post struct {
+ Title string `valid:"alphanum,required"`
+ Message string `valid:"duck,ascii"`
+ Message2 string `valid:"animal(dog)"`
+ AuthorIP string `valid:"ipv4"`
+ Date string `valid:"-"`
+}
+post := &Post{
+ Title: "My Example Post",
+ Message: "duck",
+ Message2: "dog",
+ AuthorIP: "123.234.54.3",
+}
+
+// Add your own struct validation tags
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+
+// Add your own struct validation tags with parameter
+govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
+ species := params[0]
+ return str == species
+})
+govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
+
+result, err := govalidator.ValidateStruct(post)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### WhiteList
+```go
+// Remove all characters from string ignoring characters between "a" and "z"
+println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
+```
+
+###### Custom validation functions
+Custom validation using your own domain specific validators is also available - here's an example of how to use it:
+```go
+import "github.com/asaskevich/govalidator"
+
+type CustomByteArray [6]byte // custom types are supported and can be validated
+
+type StructWithCustomByteArray struct {
+ ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
+ Email string `valid:"email"`
+ CustomMinLength int `valid:"-"`
+}
+
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // you can type switch on the context interface being validated
+ case StructWithCustomByteArray:
+ // you can check and validate against some other field in the context,
+ // return early or not validate against the context at all – your choice
+ case SomeOtherType:
+ // ...
+ default:
+ // expecting some other type? Throw/panic here or continue
+ }
+
+ switch v := i.(type) { // type switch on the struct field being validated
+ case CustomByteArray:
+ for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
+ if e != 0 {
+ return true
+ }
+ }
+ }
+ return false
+}))
+govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
+ case StructWithCustomByteArray:
+ return len(v.ID) >= v.CustomMinLength
+ }
+ return false
+}))
+```
+
+###### Custom error messages
+Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
+```go
+type Ticket struct {
+ Id int64 `json:"id"`
+ FirstName string `json:"firstname" valid:"required~First name is blank"`
+}
+```
+
+#### Notes
+Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
+Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
+
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Credits
+### Contributors
+
+This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
+
+#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
+* [Daniel Lohse](https://github.com/annismckenzie)
+* [Attila Oláh](https://github.com/attilaolah)
+* [Daniel Korner](https://github.com/Dadie)
+* [Steven Wilkin](https://github.com/stevenwilkin)
+* [Deiwin Sarjas](https://github.com/deiwin)
+* [Noah Shibley](https://github.com/slugmobile)
+* [Nathan Davies](https://github.com/nathj07)
+* [Matt Sanford](https://github.com/mzsanford)
+* [Simon ccl1115](https://github.com/ccl1115)
+
+
+
+
+### Backers
+
+Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## License
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
\ No newline at end of file
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/arrays.go b/hack/tools/vendor/github.com/asaskevich/govalidator/arrays.go
new file mode 100644
index 0000000000..5bace2654d
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/arrays.go
@@ -0,0 +1,58 @@
+package govalidator
+
+// Iterator is the function that accepts element of slice/array and its index
+type Iterator func(interface{}, int)
+
+// ResultIterator is the function that accepts element of slice/array and its index and returns any result
+type ResultIterator func(interface{}, int) interface{}
+
+// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
+type ConditionIterator func(interface{}, int) bool
+
+// Each iterates over the slice and apply Iterator to every item
+func Each(array []interface{}, iterator Iterator) {
+ for index, data := range array {
+ iterator(data, index)
+ }
+}
+
+// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
+func Map(array []interface{}, iterator ResultIterator) []interface{} {
+ var result = make([]interface{}, len(array))
+ for index, data := range array {
+ result[index] = iterator(data, index)
+ }
+ return result
+}
+
+// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
+func Find(array []interface{}, iterator ConditionIterator) interface{} {
+ for index, data := range array {
+ if iterator(data, index) {
+ return data
+ }
+ }
+ return nil
+}
+
+// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
+func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
+ var result = make([]interface{}, 0)
+ for index, data := range array {
+ if iterator(data, index) {
+ result = append(result, data)
+ }
+ }
+ return result
+}
+
+// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
+func Count(array []interface{}, iterator ConditionIterator) int {
+ count := 0
+ for index, data := range array {
+ if iterator(data, index) {
+ count = count + 1
+ }
+ }
+ return count
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/converter.go b/hack/tools/vendor/github.com/asaskevich/govalidator/converter.go
new file mode 100644
index 0000000000..cf1e5d569b
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/converter.go
@@ -0,0 +1,64 @@
+package govalidator
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// ToString convert the input to a string.
+func ToString(obj interface{}) string {
+ res := fmt.Sprintf("%v", obj)
+ return string(res)
+}
+
+// ToJSON convert the input to a valid JSON string
+func ToJSON(obj interface{}) (string, error) {
+ res, err := json.Marshal(obj)
+ if err != nil {
+ res = []byte("")
+ }
+ return string(res), err
+}
+
+// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
+func ToFloat(str string) (float64, error) {
+ res, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ res = 0.0
+ }
+ return res, err
+}
+
+// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
+func ToInt(value interface{}) (res int64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = val.Int()
+ case uint, uint8, uint16, uint32, uint64:
+ res = int64(val.Uint())
+ case string:
+ if IsInt(val.String()) {
+ res, err = strconv.ParseInt(val.String(), 0, 64)
+ if err != nil {
+ res = 0
+ }
+ } else {
+ err = fmt.Errorf("math: square root of negative number %g", value)
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("math: square root of negative number %g", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToBoolean convert the input string to a boolean.
+func ToBoolean(str string) (bool, error) {
+ return strconv.ParseBool(str)
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/error.go b/hack/tools/vendor/github.com/asaskevich/govalidator/error.go
new file mode 100644
index 0000000000..655b750cb8
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/error.go
@@ -0,0 +1,43 @@
+package govalidator
+
+import "strings"
+
+// Errors is an array of multiple errors and conforms to the error interface.
+type Errors []error
+
+// Errors returns itself.
+func (es Errors) Errors() []error {
+ return es
+}
+
+func (es Errors) Error() string {
+ var errs []string
+ for _, e := range es {
+ errs = append(errs, e.Error())
+ }
+ return strings.Join(errs, ";")
+}
+
+// Error encapsulates a name, an error and whether there's a custom error message or not.
+type Error struct {
+ Name string
+ Err error
+ CustomErrorMessageExists bool
+
+ // Validator indicates the name of the validator that failed
+ Validator string
+ Path []string
+}
+
+func (e Error) Error() string {
+ if e.CustomErrorMessageExists {
+ return e.Err.Error()
+ }
+
+ errName := e.Name
+ if len(e.Path) > 0 {
+ errName = strings.Join(append(e.Path, e.Name), ".")
+ }
+
+ return errName + ": " + e.Err.Error()
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/numerics.go b/hack/tools/vendor/github.com/asaskevich/govalidator/numerics.go
new file mode 100644
index 0000000000..7e6c652e14
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/numerics.go
@@ -0,0 +1,97 @@
+package govalidator
+
+import (
+ "math"
+ "reflect"
+)
+
+// Abs returns absolute value of number
+func Abs(value float64) float64 {
+ return math.Abs(value)
+}
+
+// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
+func Sign(value float64) float64 {
+ if value > 0 {
+ return 1
+ } else if value < 0 {
+ return -1
+ } else {
+ return 0
+ }
+}
+
+// IsNegative returns true if value < 0
+func IsNegative(value float64) bool {
+ return value < 0
+}
+
+// IsPositive returns true if value > 0
+func IsPositive(value float64) bool {
+ return value > 0
+}
+
+// IsNonNegative returns true if value >= 0
+func IsNonNegative(value float64) bool {
+ return value >= 0
+}
+
+// IsNonPositive returns true if value <= 0
+func IsNonPositive(value float64) bool {
+ return value <= 0
+}
+
+// InRange returns true if value lies between left and right border
+func InRangeInt(value, left, right interface{}) bool {
+ value64, _ := ToInt(value)
+ left64, _ := ToInt(left)
+ right64, _ := ToInt(right)
+ if left64 > right64 {
+ left64, right64 = right64, left64
+ }
+ return value64 >= left64 && value64 <= right64
+}
+
+// InRange returns true if value lies between left and right border
+func InRangeFloat32(value, left, right float32) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRange returns true if value lies between left and right border
+func InRangeFloat64(value, left, right float64) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type
+func InRange(value interface{}, left interface{}, right interface{}) bool {
+
+ reflectValue := reflect.TypeOf(value).Kind()
+ reflectLeft := reflect.TypeOf(left).Kind()
+ reflectRight := reflect.TypeOf(right).Kind()
+
+ if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int {
+ return InRangeInt(value.(int), left.(int), right.(int))
+ } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 {
+ return InRangeFloat32(value.(float32), left.(float32), right.(float32))
+ } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 {
+ return InRangeFloat64(value.(float64), left.(float64), right.(float64))
+ } else {
+ return false
+ }
+}
+
+// IsWhole returns true if value is whole number
+func IsWhole(value float64) bool {
+ return math.Remainder(value, 1) == 0
+}
+
+// IsNatural returns true if value is natural number (positive and whole)
+func IsNatural(value float64) bool {
+ return IsWhole(value) && IsPositive(value)
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/patterns.go b/hack/tools/vendor/github.com/asaskevich/govalidator/patterns.go
new file mode 100644
index 0000000000..61a05d438e
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/patterns.go
@@ -0,0 +1,101 @@
+package govalidator
+
+import "regexp"
+
+// Basic regular expressions for validating strings
+const (
+ Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
+ ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
+ ISBN13 string = "^(?:[0-9]{13})$"
+ UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ Alpha string = "^[a-zA-Z]+$"
+ Alphanumeric string = "^[a-zA-Z0-9]+$"
+ Numeric string = "^[0-9]+$"
+ Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
+ Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
+ Hexadecimal string = "^[0-9a-fA-F]+$"
+ Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
+ ASCII string = "^[\x00-\x7F]+$"
+ Multibyte string = "[^\x00-\x7F]"
+ FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ PrintableASCII string = "^[\x20-\x7E]+$"
+ DataURI string = "^data:.+\\/(.+);base64$"
+ Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
+ IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
+ URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
+ URLUsername string = `(\S+(:\S*)?@)`
+ URLPath string = `((\/|\?|#)[^\s]*)`
+ URLPort string = `(:(\d{1,5}))`
+ URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
+ URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
+ URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
+ SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
+ WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixPath string = `^(/[^/\x00]*)+/?$`
+ Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
+ tagName string = "valid"
+ hasLowerCase string = ".*[[:lower:]]"
+ hasUpperCase string = ".*[[:upper:]]"
+ hasWhitespace string = ".*[[:space:]]"
+ hasWhitespaceOnly string = "^[[:space:]]+$"
+)
+
+// Used by IsFilePath func
+const (
+ // Unknown is unresolved OS type
+ Unknown = iota
+ // Win is Windows type
+ Win
+ // Unix is *nix OS types
+ Unix
+)
+
+var (
+ userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
+ hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
+ userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
+ rxEmail = regexp.MustCompile(Email)
+ rxCreditCard = regexp.MustCompile(CreditCard)
+ rxISBN10 = regexp.MustCompile(ISBN10)
+ rxISBN13 = regexp.MustCompile(ISBN13)
+ rxUUID3 = regexp.MustCompile(UUID3)
+ rxUUID4 = regexp.MustCompile(UUID4)
+ rxUUID5 = regexp.MustCompile(UUID5)
+ rxUUID = regexp.MustCompile(UUID)
+ rxAlpha = regexp.MustCompile(Alpha)
+ rxAlphanumeric = regexp.MustCompile(Alphanumeric)
+ rxNumeric = regexp.MustCompile(Numeric)
+ rxInt = regexp.MustCompile(Int)
+ rxFloat = regexp.MustCompile(Float)
+ rxHexadecimal = regexp.MustCompile(Hexadecimal)
+ rxHexcolor = regexp.MustCompile(Hexcolor)
+ rxRGBcolor = regexp.MustCompile(RGBcolor)
+ rxASCII = regexp.MustCompile(ASCII)
+ rxPrintableASCII = regexp.MustCompile(PrintableASCII)
+ rxMultibyte = regexp.MustCompile(Multibyte)
+ rxFullWidth = regexp.MustCompile(FullWidth)
+ rxHalfWidth = regexp.MustCompile(HalfWidth)
+ rxBase64 = regexp.MustCompile(Base64)
+ rxDataURI = regexp.MustCompile(DataURI)
+ rxLatitude = regexp.MustCompile(Latitude)
+ rxLongitude = regexp.MustCompile(Longitude)
+ rxDNSName = regexp.MustCompile(DNSName)
+ rxURL = regexp.MustCompile(URL)
+ rxSSN = regexp.MustCompile(SSN)
+ rxWinPath = regexp.MustCompile(WinPath)
+ rxUnixPath = regexp.MustCompile(UnixPath)
+ rxSemver = regexp.MustCompile(Semver)
+ rxHasLowerCase = regexp.MustCompile(hasLowerCase)
+ rxHasUpperCase = regexp.MustCompile(hasUpperCase)
+ rxHasWhitespace = regexp.MustCompile(hasWhitespace)
+ rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
+)
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/types.go b/hack/tools/vendor/github.com/asaskevich/govalidator/types.go
new file mode 100644
index 0000000000..4f7e9274ad
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/types.go
@@ -0,0 +1,636 @@
+package govalidator
+
+import (
+ "reflect"
+ "regexp"
+ "sort"
+ "sync"
+)
+
+// Validator is a wrapper for a validator function that returns bool and accepts string.
+type Validator func(str string) bool
+
+// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
+// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
+type CustomTypeValidator func(i interface{}, o interface{}) bool
+
+// ParamValidator is a wrapper for validator functions that accepts additional parameters.
+type ParamValidator func(str string, params ...string) bool
+type tagOptionsMap map[string]tagOption
+
+func (t tagOptionsMap) orderedKeys() []string {
+ var keys []string
+ for k := range t {
+ keys = append(keys, k)
+ }
+
+ sort.Slice(keys, func(a, b int) bool {
+ return t[keys[a]].order < t[keys[b]].order
+ })
+
+ return keys
+}
+
+type tagOption struct {
+ name string
+ customErrorMessage string
+ order int
+}
+
+// UnsupportedTypeError is a wrapper for reflect.Type
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+// ParamTagMap is a map of functions accept variants parameters
+var ParamTagMap = map[string]ParamValidator{
+ "length": ByteLength,
+ "range": Range,
+ "runelength": RuneLength,
+ "stringlength": StringLength,
+ "matches": StringMatches,
+ "in": isInRaw,
+ "rsapub": IsRsaPub,
+}
+
+// ParamTagRegexMap maps param tags to their respective regexes.
+var ParamTagRegexMap = map[string]*regexp.Regexp{
+ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
+ "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
+ "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
+ "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
+ "in": regexp.MustCompile(`^in\((.*)\)`),
+ "matches": regexp.MustCompile(`^matches\((.+)\)$`),
+ "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
+}
+
+type customTypeTagMap struct {
+ validators map[string]CustomTypeValidator
+
+ sync.RWMutex
+}
+
+func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
+ tm.RLock()
+ defer tm.RUnlock()
+ v, ok := tm.validators[name]
+ return v, ok
+}
+
+func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
+ tm.Lock()
+ defer tm.Unlock()
+ tm.validators[name] = ctv
+}
+
+// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
+// Use this to validate compound or custom types that need to be handled as a whole, e.g.
+// `type UUID [16]byte` (this would be handled as an array of bytes).
+var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
+
+// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
+var TagMap = map[string]Validator{
+ "email": IsEmail,
+ "url": IsURL,
+ "dialstring": IsDialString,
+ "requrl": IsRequestURL,
+ "requri": IsRequestURI,
+ "alpha": IsAlpha,
+ "utfletter": IsUTFLetter,
+ "alphanum": IsAlphanumeric,
+ "utfletternum": IsUTFLetterNumeric,
+ "numeric": IsNumeric,
+ "utfnumeric": IsUTFNumeric,
+ "utfdigit": IsUTFDigit,
+ "hexadecimal": IsHexadecimal,
+ "hexcolor": IsHexcolor,
+ "rgbcolor": IsRGBcolor,
+ "lowercase": IsLowerCase,
+ "uppercase": IsUpperCase,
+ "int": IsInt,
+ "float": IsFloat,
+ "null": IsNull,
+ "uuid": IsUUID,
+ "uuidv3": IsUUIDv3,
+ "uuidv4": IsUUIDv4,
+ "uuidv5": IsUUIDv5,
+ "creditcard": IsCreditCard,
+ "isbn10": IsISBN10,
+ "isbn13": IsISBN13,
+ "json": IsJSON,
+ "multibyte": IsMultibyte,
+ "ascii": IsASCII,
+ "printableascii": IsPrintableASCII,
+ "fullwidth": IsFullWidth,
+ "halfwidth": IsHalfWidth,
+ "variablewidth": IsVariableWidth,
+ "base64": IsBase64,
+ "datauri": IsDataURI,
+ "ip": IsIP,
+ "port": IsPort,
+ "ipv4": IsIPv4,
+ "ipv6": IsIPv6,
+ "dns": IsDNSName,
+ "host": IsHost,
+ "mac": IsMAC,
+ "latitude": IsLatitude,
+ "longitude": IsLongitude,
+ "ssn": IsSSN,
+ "semver": IsSemver,
+ "rfc3339": IsRFC3339,
+ "rfc3339WithoutZone": IsRFC3339WithoutZone,
+ "ISO3166Alpha2": IsISO3166Alpha2,
+ "ISO3166Alpha3": IsISO3166Alpha3,
+ "ISO4217": IsISO4217,
+}
+
+// ISO3166Entry stores country codes
+type ISO3166Entry struct {
+ EnglishShortName string
+ FrenchShortName string
+ Alpha2Code string
+ Alpha3Code string
+ Numeric string
+}
+
+//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
+var ISO3166List = []ISO3166Entry{
+ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
+ {"Albania", "Albanie (l')", "AL", "ALB", "008"},
+ {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
+ {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
+ {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
+ {"Andorra", "Andorre (l')", "AD", "AND", "020"},
+ {"Angola", "Angola (l')", "AO", "AGO", "024"},
+ {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
+ {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
+ {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
+ {"Australia", "Australie (l')", "AU", "AUS", "036"},
+ {"Austria", "Autriche (l')", "AT", "AUT", "040"},
+ {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
+ {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
+ {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
+ {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
+ {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
+ {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
+ {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
+ {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
+ {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
+ {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
+ {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
+ {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
+ {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
+ {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
+ {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
+ {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
+ {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
+ {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
+ {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
+ {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
+ {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
+ {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
+ {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
+ {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
+ {"Canada", "Canada (le)", "CA", "CAN", "124"},
+ {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
+ {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
+ {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
+ {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
+ {"Chad", "Tchad (le)", "TD", "TCD", "148"},
+ {"Chile", "Chili (le)", "CL", "CHL", "152"},
+ {"China", "Chine (la)", "CN", "CHN", "156"},
+ {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
+ {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
+ {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
+ {"Colombia", "Colombie (la)", "CO", "COL", "170"},
+ {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
+ {"Mayotte", "Mayotte", "YT", "MYT", "175"},
+ {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
+ {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
+ {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
+ {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
+ {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
+ {"Cuba", "Cuba", "CU", "CUB", "192"},
+ {"Cyprus", "Chypre", "CY", "CYP", "196"},
+ {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
+ {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
+ {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
+ {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
+ {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
+ {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
+ {"El Salvador", "El Salvador", "SV", "SLV", "222"},
+ {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
+ {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
+ {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
+ {"Estonia", "Estonie (l')", "EE", "EST", "233"},
+ {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
+ {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
+ {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
+ {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
+ {"Finland", "Finlande (la)", "FI", "FIN", "246"},
+ {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
+ {"France", "France (la)", "FR", "FRA", "250"},
+ {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
+ {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
+ {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
+ {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
+ {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
+ {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
+ {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
+ {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
+ {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
+ {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
+ {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
+ {"Kiribati", "Kiribati", "KI", "KIR", "296"},
+ {"Greece", "Grèce (la)", "GR", "GRC", "300"},
+ {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
+ {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
+ {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
+ {"Guam", "Guam", "GU", "GUM", "316"},
+ {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
+ {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
+ {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
+ {"Haiti", "Haïti", "HT", "HTI", "332"},
+ {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
+ {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
+ {"Honduras", "Honduras (le)", "HN", "HND", "340"},
+ {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
+ {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
+ {"Iceland", "Islande (l')", "IS", "ISL", "352"},
+ {"India", "Inde (l')", "IN", "IND", "356"},
+ {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
+ {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
+ {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
+ {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
+ {"Israel", "Israël", "IL", "ISR", "376"},
+ {"Italy", "Italie (l')", "IT", "ITA", "380"},
+ {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
+ {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
+ {"Japan", "Japon (le)", "JP", "JPN", "392"},
+ {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
+ {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
+ {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
+ {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
+ {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
+ {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
+ {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
+ {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
+ {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
+ {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
+ {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
+ {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
+ {"Libya", "Libye (la)", "LY", "LBY", "434"},
+ {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
+ {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
+ {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
+ {"Macao", "Macao", "MO", "MAC", "446"},
+ {"Madagascar", "Madagascar", "MG", "MDG", "450"},
+ {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
+ {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
+ {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
+ {"Mali", "Mali (le)", "ML", "MLI", "466"},
+ {"Malta", "Malte", "MT", "MLT", "470"},
+ {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
+ {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
+ {"Mauritius", "Maurice", "MU", "MUS", "480"},
+ {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
+ {"Monaco", "Monaco", "MC", "MCO", "492"},
+ {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
+ {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
+ {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
+ {"Montserrat", "Montserrat", "MS", "MSR", "500"},
+ {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
+ {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
+ {"Oman", "Oman", "OM", "OMN", "512"},
+ {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
+ {"Nauru", "Nauru", "NR", "NRU", "520"},
+ {"Nepal", "Népal (le)", "NP", "NPL", "524"},
+ {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
+ {"Curaçao", "Curaçao", "CW", "CUW", "531"},
+ {"Aruba", "Aruba", "AW", "ABW", "533"},
+ {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
+ {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
+ {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
+ {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
+ {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
+ {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
+ {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
+ {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
+ {"Niue", "Niue", "NU", "NIU", "570"},
+ {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
+ {"Norway", "Norvège (la)", "NO", "NOR", "578"},
+ {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
+ {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
+ {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
+ {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
+ {"Palau", "Palaos (les)", "PW", "PLW", "585"},
+ {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
+ {"Panama", "Panama (le)", "PA", "PAN", "591"},
+ {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
+ {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
+ {"Peru", "Pérou (le)", "PE", "PER", "604"},
+ {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
+ {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
+ {"Poland", "Pologne (la)", "PL", "POL", "616"},
+ {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
+ {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
+ {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
+ {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
+ {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
+ {"Réunion", "Réunion (La)", "RE", "REU", "638"},
+ {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
+ {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
+ {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
+ {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
+ {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
+ {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
+ {"Anguilla", "Anguilla", "AI", "AIA", "660"},
+ {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
+ {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
+ {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
+ {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
+ {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
+ {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
+ {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
+ {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
+ {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
+ {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
+ {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
+ {"Singapore", "Singapour", "SG", "SGP", "702"},
+ {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
+ {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
+ {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
+ {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
+ {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
+ {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
+ {"Spain", "Espagne (l')", "ES", "ESP", "724"},
+ {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
+ {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
+ {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
+ {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
+ {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
+ {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
+ {"Sweden", "Suède (la)", "SE", "SWE", "752"},
+ {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
+ {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
+ {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
+ {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
+ {"Togo", "Togo (le)", "TG", "TGO", "768"},
+ {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
+ {"Tonga", "Tonga (les)", "TO", "TON", "776"},
+ {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
+ {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
+ {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
+ {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
+ {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
+ {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
+ {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
+ {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
+ {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
+ {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
+ {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
+ {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
+ {"Guernsey", "Guernesey", "GG", "GGY", "831"},
+ {"Jersey", "Jersey", "JE", "JEY", "832"},
+ {"Isle of Man", "Île de Man", "IM", "IMN", "833"},
+ {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
+ {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
+ {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
+ {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
+ {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
+ {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
+ {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
+ {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
+ {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
+ {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
+ {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
+}
+
+// ISO4217List is the list of ISO currency codes
+var ISO4217List = []string{
+ "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
+ "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
+ "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
+ "DJF", "DKK", "DOP", "DZD",
+ "EGP", "ERN", "ETB", "EUR",
+ "FJD", "FKP",
+ "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
+ "HKD", "HNL", "HRK", "HTG", "HUF",
+ "IDR", "ILS", "INR", "IQD", "IRR", "ISK",
+ "JMD", "JOD", "JPY",
+ "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
+ "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
+ "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
+ "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
+ "OMR",
+ "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
+ "QAR",
+ "RON", "RSD", "RUB", "RWF",
+ "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
+ "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
+ "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
+ "VEF", "VND", "VUV",
+ "WST",
+ "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
+ "YER",
+ "ZAR", "ZMW", "ZWL",
+}
+
+// ISO693Entry stores ISO language codes
+type ISO693Entry struct {
+ Alpha3bCode string
+ Alpha2Code string
+ English string
+}
+
+//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
+var ISO693List = []ISO693Entry{
+ {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
+ {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
+ {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
+ {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
+ {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
+ {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
+ {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
+ {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
+ {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
+ {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
+ {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
+ {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
+ {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
+ {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
+ {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
+ {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
+ {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
+ {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
+ {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
+ {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
+ {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
+ {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
+ {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
+ {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
+ {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
+ {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
+ {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
+ {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
+ {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
+ {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
+ {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
+ {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
+ {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
+ {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
+ {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
+ {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
+ {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
+ {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
+ {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
+ {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
+ {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
+ {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
+ {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
+ {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
+ {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
+ {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
+ {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
+ {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
+ {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
+ {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
+ {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
+ {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
+ {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
+ {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
+ {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
+ {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
+ {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
+ {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
+ {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
+ {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
+ {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
+ {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
+ {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
+ {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
+ {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
+ {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
+ {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
+ {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
+ {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
+ {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
+ {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
+ {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
+ {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
+ {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
+ {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
+ {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
+ {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
+ {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
+ {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
+ {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
+ {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
+ {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
+ {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
+ {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
+ {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
+ {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
+ {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
+ {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
+ {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
+ {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
+ {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
+ {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
+ {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
+ {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
+ {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
+ {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
+ {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
+ {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
+ {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
+ {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
+ {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
+ {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
+ {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
+ {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
+ {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
+ {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
+ {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
+ {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
+ {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
+ {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
+ {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
+ {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
+ {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
+ {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
+ {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
+ {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
+ {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
+ {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
+ {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
+ {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
+ {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
+ {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
+ {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
+ {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
+ {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
+ {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
+ {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
+ {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
+ {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
+ {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
+ {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
+ {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
+ {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
+ {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
+ {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
+ {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
+ {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
+ {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
+ {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
+ {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
+ {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
+ {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
+ {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
+ {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
+ {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
+ {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
+ {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
+ {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
+ {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
+ {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
+ {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
+ {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
+ {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
+ {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
+ {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
+ {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
+ {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
+ {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
+ {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
+ {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
+ {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
+ {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
+ {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
+ {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
+ {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
+ {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
+ {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
+ {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
+ {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
+ {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
+ {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
+ {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
+ {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
+ {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
+ {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
+ {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
+ {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
+ {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
+ {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
+ {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
+ {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
+ {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
+ {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
+ {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/utils.go b/hack/tools/vendor/github.com/asaskevich/govalidator/utils.go
new file mode 100644
index 0000000000..a0b706a743
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/utils.go
@@ -0,0 +1,270 @@
+package govalidator
+
+import (
+ "errors"
+ "fmt"
+ "html"
+ "math"
+ "path"
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Contains check if the string contains the substring.
+func Contains(str, substring string) bool {
+ return strings.Contains(str, substring)
+}
+
+// Matches check if string matches the pattern (pattern is regular expression)
+// In case of error return false
+func Matches(str, pattern string) bool {
+ match, _ := regexp.MatchString(pattern, str)
+ return match
+}
+
+// LeftTrim trim characters from the left-side of the input.
+// If second argument is empty, it's will be remove leading spaces.
+func LeftTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimLeftFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("^[" + chars + "]+")
+ return r.ReplaceAllString(str, "")
+}
+
+// RightTrim trim characters from the right-side of the input.
+// If second argument is empty, it's will be remove spaces.
+func RightTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimRightFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("[" + chars + "]+$")
+ return r.ReplaceAllString(str, "")
+}
+
+// Trim trim characters from both sides of the input.
+// If second argument is empty, it's will be remove spaces.
+func Trim(str, chars string) string {
+ return LeftTrim(RightTrim(str, chars), chars)
+}
+
+// WhiteList remove characters that do not appear in the whitelist.
+func WhiteList(str, chars string) string {
+ pattern := "[^" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// BlackList remove characters that appear in the blacklist.
+func BlackList(str, chars string) string {
+ pattern := "[" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
+// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
+func StripLow(str string, keepNewLines bool) string {
+ chars := ""
+ if keepNewLines {
+ chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
+ } else {
+ chars = "\x00-\x1F\x7F"
+ }
+ return BlackList(str, chars)
+}
+
+// ReplacePattern replace regular expression pattern in string
+func ReplacePattern(str, pattern, replace string) string {
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, replace)
+}
+
+// Escape replace <, >, & and " with HTML entities.
+var Escape = html.EscapeString
+
+func addSegment(inrune, segment []rune) []rune {
+ if len(segment) == 0 {
+ return inrune
+ }
+ if len(inrune) != 0 {
+ inrune = append(inrune, '_')
+ }
+ inrune = append(inrune, segment...)
+ return inrune
+}
+
+// UnderscoreToCamelCase converts from underscore separated form to camel case form.
+// Ex.: my_func => MyFunc
+func UnderscoreToCamelCase(s string) string {
+ return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
+}
+
+// CamelCaseToUnderscore converts from camel case form to underscore separated form.
+// Ex.: MyFunc => my_func
+func CamelCaseToUnderscore(str string) string {
+ var output []rune
+ var segment []rune
+ for _, r := range str {
+
+ // not treat number as separate segment
+ if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
+ output = addSegment(output, segment)
+ segment = nil
+ }
+ segment = append(segment, unicode.ToLower(r))
+ }
+ output = addSegment(output, segment)
+ return string(output)
+}
+
+// Reverse return reversed string
+func Reverse(s string) string {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r)
+}
+
+// GetLines split string by "\n" and return array of lines
+func GetLines(s string) []string {
+ return strings.Split(s, "\n")
+}
+
+// GetLine return specified line of multiline string
+func GetLine(s string, index int) (string, error) {
+ lines := GetLines(s)
+ if index < 0 || index >= len(lines) {
+ return "", errors.New("line index out of bounds")
+ }
+ return lines[index], nil
+}
+
+// RemoveTags remove all tags from HTML string
+func RemoveTags(s string) string {
+ return ReplacePattern(s, "<[^>]*>", "")
+}
+
+// SafeFileName return safe string that can be used in file names
+func SafeFileName(str string) string {
+ name := strings.ToLower(str)
+ name = path.Clean(path.Base(name))
+ name = strings.Trim(name, " ")
+ separators, err := regexp.Compile(`[ &_=+:]`)
+ if err == nil {
+ name = separators.ReplaceAllString(name, "-")
+ }
+ legal, err := regexp.Compile(`[^[:alnum:]-.]`)
+ if err == nil {
+ name = legal.ReplaceAllString(name, "")
+ }
+ for strings.Contains(name, "--") {
+ name = strings.Replace(name, "--", "-", -1)
+ }
+ return name
+}
+
+// NormalizeEmail canonicalize an email address.
+// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
+// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
+// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
+// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
+// normalized to @gmail.com.
+func NormalizeEmail(str string) (string, error) {
+ if !IsEmail(str) {
+ return "", fmt.Errorf("%s is not an email", str)
+ }
+ parts := strings.Split(str, "@")
+ parts[0] = strings.ToLower(parts[0])
+ parts[1] = strings.ToLower(parts[1])
+ if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
+ parts[1] = "gmail.com"
+ parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
+ }
+ return strings.Join(parts, "@"), nil
+}
+
+// Truncate a string to the closest length without breaking words.
+func Truncate(str string, length int, ending string) string {
+ var aftstr, befstr string
+ if len(str) > length {
+ words := strings.Fields(str)
+ before, present := 0, 0
+ for i := range words {
+ befstr = aftstr
+ before = present
+ aftstr = aftstr + words[i] + " "
+ present = len(aftstr)
+ if present > length && i != 0 {
+ if (length - before) < (present - length) {
+ return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ }
+ }
+
+ return str
+}
+
+// PadLeft pad left side of string if size of string is less then indicated pad length
+func PadLeft(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, false)
+}
+
+// PadRight pad right side of string if size of string is less then indicated pad length
+func PadRight(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, false, true)
+}
+
+// PadBoth pad sides of string if size of string is less then indicated pad length
+func PadBoth(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, true)
+}
+
+// PadString either left, right or both sides, not the padding string can be unicode and more then one
+// character
+func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
+
+ // When padded length is less then the current string size
+ if padLen < utf8.RuneCountInString(str) {
+ return str
+ }
+
+ padLen -= utf8.RuneCountInString(str)
+
+ targetLen := padLen
+
+ targetLenLeft := targetLen
+ targetLenRight := targetLen
+ if padLeft && padRight {
+ targetLenLeft = padLen / 2
+ targetLenRight = padLen - targetLenLeft
+ }
+
+ strToRepeatLen := utf8.RuneCountInString(padStr)
+
+ repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
+ repeatedString := strings.Repeat(padStr, repeatTimes)
+
+ leftSide := ""
+ if padLeft {
+ leftSide = repeatedString[0:targetLenLeft]
+ }
+
+ rightSide := ""
+ if padRight {
+ rightSide = repeatedString[0:targetLenRight]
+ }
+
+ return leftSide + str + rightSide
+}
+
+// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
+func TruncatingErrorf(str string, args ...interface{}) error {
+ n := strings.Count(str, "%s")
+ return fmt.Errorf(str, args[:n]...)
+}
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/validator.go b/hack/tools/vendor/github.com/asaskevich/govalidator/validator.go
new file mode 100644
index 0000000000..b18bbcb4c9
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/validator.go
@@ -0,0 +1,1278 @@
+// Package govalidator is package of validators and sanitizers for strings, structs and collections.
+package govalidator
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ fieldsRequiredByDefault bool
+ nilPtrAllowedByRequired = false
+ notNumberRegexp = regexp.MustCompile("[^0-9]+")
+ whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
+ paramsRegexp = regexp.MustCompile(`\(.*\)$`)
+)
+
+const maxURLRuneCount = 2083
+const minURLRuneCount = 3
+const RF3339WithoutZone = "2006-01-02T15:04:05"
+
+// SetFieldsRequiredByDefault causes validation to fail when struct fields
+// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
+// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+// type exampleStruct struct {
+// Name string ``
+// Email string `valid:"email"`
+// This, however, will only fail when Email is empty or an invalid email address:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email"`
+// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email,optional"`
+func SetFieldsRequiredByDefault(value bool) {
+ fieldsRequiredByDefault = value
+}
+
+// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
+// The validation will still reject ptr fields in their zero value state. Example with this enabled:
+// type exampleStruct struct {
+// Name *string `valid:"required"`
+// With `Name` set to "", this will be considered invalid input and will cause a validation error.
+// With `Name` set to nil, this will be considered valid by validation.
+// By default this is disabled.
+func SetNilPtrAllowedByRequired(value bool) {
+ nilPtrAllowedByRequired = value
+}
+
+// IsEmail check if the string is an email.
+func IsEmail(str string) bool {
+ // TODO uppercase letters are not supported
+ return rxEmail.MatchString(str)
+}
+
+// IsExistingEmail check if the string is an email of existing domain
+func IsExistingEmail(email string) bool {
+
+ if len(email) < 6 || len(email) > 254 {
+ return false
+ }
+ at := strings.LastIndex(email, "@")
+ if at <= 0 || at > len(email)-3 {
+ return false
+ }
+ user := email[:at]
+ host := email[at+1:]
+ if len(user) > 64 {
+ return false
+ }
+ if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
+ return false
+ }
+ switch host {
+ case "localhost", "example.com":
+ return true
+ }
+ if _, err := net.LookupMX(host); err != nil {
+ if _, err := net.LookupIP(host); err != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsURL check if the string is an URL.
+func IsURL(str string) bool {
+ if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
+ return false
+ }
+ strTemp := str
+ if strings.Contains(str, ":") && !strings.Contains(str, "://") {
+ // support no indicated urlscheme but with colon for port number
+ // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
+ strTemp = "http://" + str
+ }
+ u, err := url.Parse(strTemp)
+ if err != nil {
+ return false
+ }
+ if strings.HasPrefix(u.Host, ".") {
+ return false
+ }
+ if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
+ return false
+ }
+ return rxURL.MatchString(str)
+}
+
+// IsRequestURL check if the string rawurl, assuming
+// it was received in an HTTP request, is a valid
+// URL confirm to RFC 3986
+func IsRequestURL(rawurl string) bool {
+ url, err := url.ParseRequestURI(rawurl)
+ if err != nil {
+ return false //Couldn't even parse the rawurl
+ }
+ if len(url.Scheme) == 0 {
+ return false //No Scheme found
+ }
+ return true
+}
+
+// IsRequestURI check if the string rawurl, assuming
+// it was received in an HTTP request, is an
+// absolute URI or an absolute path.
+func IsRequestURI(rawurl string) bool {
+ _, err := url.ParseRequestURI(rawurl)
+ return err == nil
+}
+
+// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid.
+func IsAlpha(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlpha.MatchString(str)
+}
+
+//IsUTFLetter check if the string contains only unicode letter characters.
+//Similar to IsAlpha but for all languages. Empty string is valid.
+func IsUTFLetter(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+
+ for _, c := range str {
+ if !unicode.IsLetter(c) {
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid.
+func IsAlphanumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlphanumeric.MatchString(str)
+}
+
+// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid.
+func IsUTFLetterNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ for _, c := range str {
+ if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsNumeric check if the string contains only numbers. Empty string is valid.
+func IsNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxNumeric.MatchString(str)
+}
+
+// IsUTFNumeric check if the string contains only unicode numbers of any kind.
+// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
+func IsUTFNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsNumber(c) { //numbers && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid.
+func IsUTFDigit(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsDigit(c) { //digits && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsHexadecimal check if the string is a hexadecimal number.
+func IsHexadecimal(str string) bool {
+ return rxHexadecimal.MatchString(str)
+}
+
+// IsHexcolor check if the string is a hexadecimal color.
+func IsHexcolor(str string) bool {
+ return rxHexcolor.MatchString(str)
+}
+
+// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
+func IsRGBcolor(str string) bool {
+ return rxRGBcolor.MatchString(str)
+}
+
+// IsLowerCase check if the string is lowercase. Empty string is valid.
+func IsLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToLower(str)
+}
+
+// IsUpperCase check if the string is uppercase. Empty string is valid.
+func IsUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToUpper(str)
+}
+
+// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid.
+func HasLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasLowerCase.MatchString(str)
+}
+
+// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid.
+func HasUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasUpperCase.MatchString(str)
+}
+
+// IsInt check if the string is an integer. Empty string is valid.
+func IsInt(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxInt.MatchString(str)
+}
+
+// IsFloat check if the string is a float.
+func IsFloat(str string) bool {
+ return str != "" && rxFloat.MatchString(str)
+}
+
+// IsDivisibleBy check if the string is a number that's divisible by another.
+// If second argument is not valid integer or zero, it's return false.
+// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
+func IsDivisibleBy(str, num string) bool {
+ f, _ := ToFloat(str)
+ p := int64(f)
+ q, _ := ToInt(num)
+ if q == 0 {
+ return false
+ }
+ return (p == 0) || (p%q == 0)
+}
+
+// IsNull check if the string is null.
+func IsNull(str string) bool {
+ return len(str) == 0
+}
+
+// HasWhitespaceOnly checks the string only contains whitespace
+func HasWhitespaceOnly(str string) bool {
+ return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
+}
+
+// HasWhitespace checks if the string contains any whitespace
+func HasWhitespace(str string) bool {
+ return len(str) > 0 && rxHasWhitespace.MatchString(str)
+}
+
+// IsByteLength check if the string's length (in bytes) falls in a range.
+func IsByteLength(str string, min, max int) bool {
+ return len(str) >= min && len(str) <= max
+}
+
+// IsUUIDv3 check if the string is a UUID version 3.
+func IsUUIDv3(str string) bool {
+ return rxUUID3.MatchString(str)
+}
+
+// IsUUIDv4 check if the string is a UUID version 4.
+func IsUUIDv4(str string) bool {
+ return rxUUID4.MatchString(str)
+}
+
+// IsUUIDv5 check if the string is a UUID version 5.
+func IsUUIDv5(str string) bool {
+ return rxUUID5.MatchString(str)
+}
+
+// IsUUID check if the string is a UUID (version 3, 4 or 5).
+func IsUUID(str string) bool {
+ return rxUUID.MatchString(str)
+}
+
+// IsCreditCard check if the string is a credit card.
+func IsCreditCard(str string) bool {
+ sanitized := notNumberRegexp.ReplaceAllString(str, "")
+ if !rxCreditCard.MatchString(sanitized) {
+ return false
+ }
+ var sum int64
+ var digit string
+ var tmpNum int64
+ var shouldDouble bool
+ for i := len(sanitized) - 1; i >= 0; i-- {
+ digit = sanitized[i:(i + 1)]
+ tmpNum, _ = ToInt(digit)
+ if shouldDouble {
+ tmpNum *= 2
+ if tmpNum >= 10 {
+ sum += ((tmpNum % 10) + 1)
+ } else {
+ sum += tmpNum
+ }
+ } else {
+ sum += tmpNum
+ }
+ shouldDouble = !shouldDouble
+ }
+
+ return sum%10 == 0
+}
+
+// IsISBN10 check if the string is an ISBN version 10.
+func IsISBN10(str string) bool {
+ return IsISBN(str, 10)
+}
+
+// IsISBN13 check if the string is an ISBN version 13.
+func IsISBN13(str string) bool {
+ return IsISBN(str, 13)
+}
+
+// IsISBN check if the string is an ISBN (version 10 or 13).
+// If version value is not equal to 10 or 13, it will be check both variants.
+func IsISBN(str string, version int) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ var checksum int32
+ var i int32
+ if version == 10 {
+ if !rxISBN10.MatchString(sanitized) {
+ return false
+ }
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(sanitized[i]-'0')
+ }
+ if sanitized[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(sanitized[9]-'0')
+ }
+ if checksum%11 == 0 {
+ return true
+ }
+ return false
+ } else if version == 13 {
+ if !rxISBN13.MatchString(sanitized) {
+ return false
+ }
+ factor := []int32{1, 3}
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(sanitized[i]-'0')
+ }
+ return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
+ }
+ return IsISBN(str, 10) || IsISBN(str, 13)
+}
+
+// IsJSON check if the string is valid JSON (note: uses json.Unmarshal).
+func IsJSON(str string) bool {
+ var js json.RawMessage
+ return json.Unmarshal([]byte(str), &js) == nil
+}
+
+// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid.
+func IsMultibyte(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxMultibyte.MatchString(str)
+}
+
+// IsASCII check if the string contains ASCII chars only. Empty string is valid.
+func IsASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxASCII.MatchString(str)
+}
+
+// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid.
+func IsPrintableASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxPrintableASCII.MatchString(str)
+}
+
+// IsFullWidth check if the string contains any full-width chars. Empty string is valid.
+func IsFullWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxFullWidth.MatchString(str)
+}
+
+// IsHalfWidth check if the string contains any half-width chars. Empty string is valid.
+func IsHalfWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str)
+}
+
+// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid.
+func IsVariableWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
+}
+
+// IsBase64 check if a string is base64 encoded.
+func IsBase64(str string) bool {
+ return rxBase64.MatchString(str)
+}
+
+// IsFilePath check is a string is Win or Unix file path and returns it's type.
+func IsFilePath(str string) (bool, int) {
+ if rxWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false, Win
+ }
+ return true, Win
+ } else if rxUnixPath.MatchString(str) {
+ return true, Unix
+ }
+ return false, Unknown
+}
+
+// IsDataURI checks if a string is base64 encoded data URI such as an image
+func IsDataURI(str string) bool {
+ dataURI := strings.Split(str, ",")
+ if !rxDataURI.MatchString(dataURI[0]) {
+ return false
+ }
+ return IsBase64(dataURI[1])
+}
+
+// IsISO3166Alpha2 checks if a string is valid two-letter country code
+func IsISO3166Alpha2(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO3166Alpha3 checks if a string is valid three-letter country code
+func IsISO3166Alpha3(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha3Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha2 checks if a string is valid two-letter language code
+func IsISO693Alpha2(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha3b checks if a string is valid three-letter language code
+func IsISO693Alpha3b(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha3bCode {
+ return true
+ }
+ }
+ return false
+}
+
+// IsDNSName will validate the given string as a DNS name
+func IsDNSName(str string) bool {
+ if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
+ // constraints already violated
+ return false
+ }
+ return !IsIP(str) && rxDNSName.MatchString(str)
+}
+
+// IsHash checks if a string is a hash of type algorithm.
+// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b']
+func IsHash(str string, algorithm string) bool {
+ len := "0"
+ algo := strings.ToLower(algorithm)
+
+ if algo == "crc32" || algo == "crc32b" {
+ len = "8"
+ } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" {
+ len = "32"
+ } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" {
+ len = "40"
+ } else if algo == "tiger192" {
+ len = "48"
+ } else if algo == "sha256" {
+ len = "64"
+ } else if algo == "sha384" {
+ len = "96"
+ } else if algo == "sha512" {
+ len = "128"
+ } else {
+ return false
+ }
+
+ return Matches(str, "^[a-f0-9]{"+len+"}$")
+}
+
+// IsDialString validates the given string for usage with the various Dial() functions
+func IsDialString(str string) bool {
+
+ if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
+ return true
+ }
+
+ return false
+}
+
+// IsIP checks if a string is either IP version 4 or 6.
+func IsIP(str string) bool {
+ return net.ParseIP(str) != nil
+}
+
+// IsPort checks if a string represents a valid port
+func IsPort(str string) bool {
+ if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
+ return true
+ }
+ return false
+}
+
+// IsIPv4 check if the string is an IP version 4.
+func IsIPv4(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ".")
+}
+
+// IsIPv6 check if the string is an IP version 6.
+func IsIPv6(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ":")
+}
+
+// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
+func IsCIDR(str string) bool {
+ _, _, err := net.ParseCIDR(str)
+ return err == nil
+}
+
+// IsMAC check if a string is valid MAC address.
+// Possible MAC formats:
+// 01:23:45:67:89:ab
+// 01:23:45:67:89:ab:cd:ef
+// 01-23-45-67-89-ab
+// 01-23-45-67-89-ab-cd-ef
+// 0123.4567.89ab
+// 0123.4567.89ab.cdef
+func IsMAC(str string) bool {
+ _, err := net.ParseMAC(str)
+ return err == nil
+}
+
+// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
+func IsHost(str string) bool {
+ return IsIP(str) || IsDNSName(str)
+}
+
+// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId.
+func IsMongoID(str string) bool {
+ return rxHexadecimal.MatchString(str) && (len(str) == 24)
+}
+
+// IsLatitude check if a string is valid latitude.
+func IsLatitude(str string) bool {
+ return rxLatitude.MatchString(str)
+}
+
+// IsLongitude check if a string is valid longitude.
+func IsLongitude(str string) bool {
+ return rxLongitude.MatchString(str)
+}
+
+// IsRsaPublicKey check if a string is valid public key with provided length
+func IsRsaPublicKey(str string, keylen int) bool {
+ bb := bytes.NewBufferString(str)
+ pemBytes, err := ioutil.ReadAll(bb)
+ if err != nil {
+ return false
+ }
+ block, _ := pem.Decode(pemBytes)
+ if block != nil && block.Type != "PUBLIC KEY" {
+ return false
+ }
+ var der []byte
+
+ if block != nil {
+ der = block.Bytes
+ } else {
+ der, err = base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return false
+ }
+ }
+
+ key, err := x509.ParsePKIXPublicKey(der)
+ if err != nil {
+ return false
+ }
+ pubkey, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return false
+ }
+ bitlen := len(pubkey.N.Bytes()) * 8
+ return bitlen == int(keylen)
+}
+
+func toJSONName(tag string) string {
+ if tag == "" {
+ return ""
+ }
+
+ // JSON name always comes first. If there's no options then split[0] is
+ // JSON name, if JSON name is not set, then split[0] is an empty string.
+ split := strings.SplitN(tag, ",", 2)
+
+ name := split[0]
+
+ // However it is possible that the field is skipped when
+ // (de-)serializing from/to JSON, in which case assume that there is no
+ // tag name to use
+ if name == "-" {
+ return ""
+ }
+ return name
+}
+
+func PrependPathToErrors(err error, path string) error {
+ switch err2 := err.(type) {
+ case Error:
+ err2.Path = append([]string{path}, err2.Path...)
+ return err2
+ case Errors:
+ errors := err2.Errors()
+ for i, err3 := range errors {
+ errors[i] = PrependPathToErrors(err3, path)
+ }
+ return err2
+ }
+ fmt.Println(err)
+ return err
+}
+
+// ValidateStruct use tags for fields.
+// result will be equal to `false` if there are any errors.
+func ValidateStruct(s interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ val := reflect.ValueOf(s)
+ if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ // we only accept structs
+ if val.Kind() != reflect.Struct {
+ return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
+ }
+ var errs Errors
+ for i := 0; i < val.NumField(); i++ {
+ valueField := val.Field(i)
+ typeField := val.Type().Field(i)
+ if typeField.PkgPath != "" {
+ continue // Private field
+ }
+ structResult := true
+ if valueField.Kind() == reflect.Interface {
+ valueField = valueField.Elem()
+ }
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ typeField.Tag.Get(tagName) != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = PrependPathToErrors(err, typeField.Name)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err2 := typeCheck(valueField, typeField, val, nil)
+ if err2 != nil {
+
+ // Replace structure name with JSON name if there is a tag on the variable
+ jsonTag := toJSONName(typeField.Tag.Get("json"))
+ if jsonTag != "" {
+ switch jsonError := err2.(type) {
+ case Error:
+ jsonError.Name = jsonTag
+ err2 = jsonError
+ case Errors:
+ for i2, err3 := range jsonError {
+ switch customErr := err3.(type) {
+ case Error:
+ customErr.Name = jsonTag
+ jsonError[i2] = customErr
+ }
+ }
+
+ err2 = jsonError
+ }
+ }
+
+ errs = append(errs, err2)
+ }
+ result = result && resultField && structResult
+ }
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result, err
+}
+
+// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
+func parseTagIntoMap(tag string) tagOptionsMap {
+ optionsMap := make(tagOptionsMap)
+ options := strings.Split(tag, ",")
+
+ for i, option := range options {
+ option = strings.TrimSpace(option)
+
+ validationOptions := strings.Split(option, "~")
+ if !isValidTag(validationOptions[0]) {
+ continue
+ }
+ if len(validationOptions) == 2 {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
+ } else {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
+ }
+ }
+ return optionsMap
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// IsSSN will validate the given string as a U.S. Social Security Number
+func IsSSN(str string) bool {
+ if str == "" || len(str) != 11 {
+ return false
+ }
+ return rxSSN.MatchString(str)
+}
+
+// IsSemver check if string is valid semantic version
+func IsSemver(str string) bool {
+ return rxSemver.MatchString(str)
+}
+
+// IsTime check if string is valid according to given format
+func IsTime(str string, format string) bool {
+ _, err := time.Parse(format, str)
+ return err == nil
+}
+
+// IsRFC3339 check if string is valid timestamp value according to RFC3339
+func IsRFC3339(str string) bool {
+ return IsTime(str, time.RFC3339)
+}
+
+// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone.
+func IsRFC3339WithoutZone(str string) bool {
+ return IsTime(str, RF3339WithoutZone)
+}
+
+// IsISO4217 check if string is valid ISO currency code
+func IsISO4217(str string) bool {
+ for _, currency := range ISO4217List {
+ if str == currency {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ByteLength check string's length
+func ByteLength(str string, params ...string) bool {
+ if len(params) == 2 {
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return len(str) >= int(min) && len(str) <= int(max)
+ }
+
+ return false
+}
+
+// RuneLength check string's length
+// Alias for StringLength
+func RuneLength(str string, params ...string) bool {
+ return StringLength(str, params...)
+}
+
+// IsRsaPub check whether string is valid RSA key
+// Alias for IsRsaPublicKey
+func IsRsaPub(str string, params ...string) bool {
+ if len(params) == 1 {
+ len, _ := ToInt(params[0])
+ return IsRsaPublicKey(str, int(len))
+ }
+
+ return false
+}
+
+// StringMatches checks if a string matches a given pattern.
+func StringMatches(s string, params ...string) bool {
+ if len(params) == 1 {
+ pattern := params[0]
+ return Matches(s, pattern)
+ }
+ return false
+}
+
+// StringLength check string's length (including multi byte strings)
+func StringLength(str string, params ...string) bool {
+
+ if len(params) == 2 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return strLength >= int(min) && strLength <= int(max)
+ }
+
+ return false
+}
+
+// Range check string's length
+func Range(str string, params ...string) bool {
+ if len(params) == 2 {
+ value, _ := ToFloat(str)
+ min, _ := ToFloat(params[0])
+ max, _ := ToFloat(params[1])
+ return InRange(value, min, max)
+ }
+
+ return false
+}
+
+func isInRaw(str string, params ...string) bool {
+ if len(params) == 1 {
+ rawParams := params[0]
+
+ parsedParams := strings.Split(rawParams, "|")
+
+ return IsIn(str, parsedParams...)
+ }
+
+ return false
+}
+
+// IsIn check if string str is a member of the set of strings params
+func IsIn(str string, params ...string) bool {
+ for _, param := range params {
+ if str == param {
+ return true
+ }
+ }
+
+ return false
+}
+
+func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
+ if nilPtrAllowedByRequired {
+ k := v.Kind()
+ if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
+ return true, nil
+ }
+ }
+
+ if requiredOption, isRequired := options["required"]; isRequired {
+ if len(requiredOption.customErrorMessage) > 0 {
+ return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
+ } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
+ return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
+ }
+ // not required and empty is valid
+ return true, nil
+}
+
+func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) {
+ if !v.IsValid() {
+ return false, nil
+ }
+
+ tag := t.Tag.Get(tagName)
+
+ // Check if the field should be ignored
+ switch tag {
+ case "":
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
+ if !fieldsRequiredByDefault {
+ return true, nil
+ }
+ return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
+ }
+ case "-":
+ return true, nil
+ }
+
+ isRootType := false
+ if options == nil {
+ isRootType = true
+ options = parseTagIntoMap(tag)
+ }
+
+ if isEmptyValue(v) {
+ // an empty value is not validated, check only required
+ isValid, resultErr = checkRequired(v, t, options)
+ for key := range options {
+ delete(options, key)
+ }
+ return isValid, resultErr
+ }
+
+ var customTypeErrors Errors
+ optionsOrder := options.orderedKeys()
+ for _, validatorName := range optionsOrder {
+ validatorStruct := options[validatorName]
+ if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
+ delete(options, validatorName)
+
+ if result := validatefunc(v.Interface(), o.Interface()); !result {
+ if len(validatorStruct.customErrorMessage) > 0 {
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
+ continue
+ }
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
+ }
+ }
+ }
+
+ if len(customTypeErrors.Errors()) > 0 {
+ return false, customTypeErrors
+ }
+
+ if isRootType {
+ // Ensure that we've checked the value by all specified validators before report that the value is valid
+ defer func() {
+ delete(options, "optional")
+ delete(options, "required")
+
+ if isValid && resultErr == nil && len(options) != 0 {
+ optionsOrder := options.orderedKeys()
+ for _, validator := range optionsOrder {
+ isValid = false
+ resultErr = Error{t.Name, fmt.Errorf(
+ "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
+ return
+ }
+ }
+ }()
+ }
+
+ switch v.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ // for each tag option check the map of validator functions
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // Check whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // Check for param validators
+ for key, value := range ParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := ParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ // type not yet supported, fail
+ return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
+ }
+ }
+
+ if validatefunc, ok := TagMap[validator]; ok {
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field); !result && !negate || result && negate {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ //Not Yet Supported Types (Fail here!)
+ err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
+ return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+ return true, nil
+ case reflect.Map:
+ if v.Type().Key().Kind() != reflect.String {
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+ var sv stringValues
+ sv = v.MapKeys()
+ sort.Sort(sv)
+ result := true
+ for i, k := range sv {
+ var resultItem bool
+ var err error
+ if v.MapIndex(k).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.MapIndex(k), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
+ if err != nil {
+ err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Slice, reflect.Array:
+ result := true
+ for i := 0; i < v.Len(); i++ {
+ var resultItem bool
+ var err error
+ if v.Index(i).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.Index(i), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.Index(i).Interface())
+ if err != nil {
+ err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Interface:
+ // If the value is an interface then encode its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return ValidateStruct(v.Interface())
+ case reflect.Ptr:
+ // If the value is a pointer then check its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return typeCheck(v.Elem(), t, o, options)
+ case reflect.Struct:
+ return ValidateStruct(v.Interface())
+ default:
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func stripParams(validatorString string) string {
+ return paramsRegexp.ReplaceAllString(validatorString, "")
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String, reflect.Array:
+ return v.Len() == 0
+ case reflect.Map, reflect.Slice:
+ return v.Len() == 0 || v.IsNil()
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
+}
+
+// ErrorByField returns error for specified field of the struct
+// validated by ValidateStruct or empty string if there are no errors
+// or this field doesn't exists or doesn't have any errors.
+func ErrorByField(e error, field string) string {
+ if e == nil {
+ return ""
+ }
+ return ErrorsByField(e)[field]
+}
+
+// ErrorsByField returns map of errors of the struct validated
+// by ValidateStruct or empty map if there are no errors.
+func ErrorsByField(e error) map[string]string {
+ m := make(map[string]string)
+ if e == nil {
+ return m
+ }
+ // prototype for ValidateStruct
+
+ switch e.(type) {
+ case Error:
+ m[e.(Error).Name] = e.(Error).Err.Error()
+ case Errors:
+ for _, item := range e.(Errors).Errors() {
+ n := ErrorsByField(item)
+ for k, v := range n {
+ m[k] = v
+ }
+ }
+ }
+
+ return m
+}
+
+// Error returns string equivalent for reflect.Type
+func (e *UnsupportedTypeError) Error() string {
+ return "validator: unsupported type: " + e.Type.String()
+}
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/wercker.yml b/hack/tools/vendor/github.com/asaskevich/govalidator/wercker.yml
new file mode 100644
index 0000000000..cac7a5fcf0
--- /dev/null
+++ b/hack/tools/vendor/github.com/asaskevich/govalidator/wercker.yml
@@ -0,0 +1,15 @@
+box: golang
+build:
+ steps:
+ - setup-go-workspace
+
+ - script:
+ name: go get
+ code: |
+ go version
+ go get -t ./...
+
+ - script:
+ name: go test
+ code: |
+ go test -race ./...
diff --git a/hack/tools/vendor/github.com/blang/semver/v4/LICENSE b/hack/tools/vendor/github.com/blang/semver/v4/LICENSE
new file mode 100644
index 0000000000..5ba5c86fcb
--- /dev/null
+++ b/hack/tools/vendor/github.com/blang/semver/v4/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (c) 2014 Benedikt Lang
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/hack/tools/vendor/github.com/blang/semver/v4/json.go b/hack/tools/vendor/github.com/blang/semver/v4/json.go
new file mode 100644
index 0000000000..a74bf7c449
--- /dev/null
+++ b/hack/tools/vendor/github.com/blang/semver/v4/json.go
@@ -0,0 +1,23 @@
+package semver
+
+import (
+ "encoding/json"
+)
+
+// MarshalJSON implements the encoding/json.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(data []byte) (err error) {
+ var versionString string
+
+ if err = json.Unmarshal(data, &versionString); err != nil {
+ return
+ }
+
+ *v, err = Parse(versionString)
+
+ return
+}
diff --git a/hack/tools/vendor/github.com/blang/semver/v4/range.go b/hack/tools/vendor/github.com/blang/semver/v4/range.go
new file mode 100644
index 0000000000..95f7139b97
--- /dev/null
+++ b/hack/tools/vendor/github.com/blang/semver/v4/range.go
@@ -0,0 +1,416 @@
+package semver
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type wildcardType int
+
+const (
+ noneWildcard wildcardType = iota
+ majorWildcard wildcardType = 1
+ minorWildcard wildcardType = 2
+ patchWildcard wildcardType = 3
+)
+
+func wildcardTypefromInt(i int) wildcardType {
+ switch i {
+ case 1:
+ return majorWildcard
+ case 2:
+ return minorWildcard
+ case 3:
+ return patchWildcard
+ default:
+ return noneWildcard
+ }
+}
+
+type comparator func(Version, Version) bool
+
+var (
+ compEQ comparator = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == 0
+ }
+ compNE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) != 0
+ }
+ compGT = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == 1
+ }
+ compGE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) >= 0
+ }
+ compLT = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == -1
+ }
+ compLE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) <= 0
+ }
+)
+
+type versionRange struct {
+ v Version
+ c comparator
+}
+
+// rangeFunc creates a Range from the given versionRange.
+func (vr *versionRange) rangeFunc() Range {
+ return Range(func(v Version) bool {
+ return vr.c(v, vr.v)
+ })
+}
+
+// Range represents a range of versions.
+// A Range can be used to check if a Version satisfies it:
+//
+// range, err := semver.ParseRange(">1.0.0 <2.0.0")
+// range(semver.MustParse("1.1.1") // returns true
+type Range func(Version) bool
+
+// OR combines the existing Range with another Range using logical OR.
+func (rf Range) OR(f Range) Range {
+ return Range(func(v Version) bool {
+ return rf(v) || f(v)
+ })
+}
+
+// AND combines the existing Range with another Range using logical AND.
+func (rf Range) AND(f Range) Range {
+ return Range(func(v Version) bool {
+ return rf(v) && f(v)
+ })
+}
+
+// ParseRange parses a range and returns a Range.
+// If the range could not be parsed an error is returned.
+//
+// Valid ranges are:
+// - "<1.0.0"
+// - "<=1.0.0"
+// - ">1.0.0"
+// - ">=1.0.0"
+// - "1.0.0", "=1.0.0", "==1.0.0"
+// - "!1.0.0", "!=1.0.0"
+//
+// A Range can consist of multiple ranges separated by space:
+// Ranges can be linked by logical AND:
+// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
+// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
+//
+// Ranges can also be linked by logical OR:
+// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
+//
+// AND has a higher precedence than OR. It's not possible to use brackets.
+//
+// Ranges can be combined by both AND and OR
+//
+// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
+func ParseRange(s string) (Range, error) {
+ parts := splitAndTrim(s)
+ orParts, err := splitORParts(parts)
+ if err != nil {
+ return nil, err
+ }
+ expandedParts, err := expandWildcardVersion(orParts)
+ if err != nil {
+ return nil, err
+ }
+ var orFn Range
+ for _, p := range expandedParts {
+ var andFn Range
+ for _, ap := range p {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+ vr, err := buildVersionRange(opStr, vStr)
+ if err != nil {
+ return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
+ }
+ rf := vr.rangeFunc()
+
+ // Set function
+ if andFn == nil {
+ andFn = rf
+ } else { // Combine with existing function
+ andFn = andFn.AND(rf)
+ }
+ }
+ if orFn == nil {
+ orFn = andFn
+ } else {
+ orFn = orFn.OR(andFn)
+ }
+
+ }
+ return orFn, nil
+}
+
+// splitORParts splits the already cleaned parts by '||'.
+// Checks for invalid positions of the operator and returns an
+// error if found.
+func splitORParts(parts []string) ([][]string, error) {
+ var ORparts [][]string
+ last := 0
+ for i, p := range parts {
+ if p == "||" {
+ if i == 0 {
+ return nil, fmt.Errorf("First element in range is '||'")
+ }
+ ORparts = append(ORparts, parts[last:i])
+ last = i + 1
+ }
+ }
+ if last == len(parts) {
+ return nil, fmt.Errorf("Last element in range is '||'")
+ }
+ ORparts = append(ORparts, parts[last:])
+ return ORparts, nil
+}
+
+// buildVersionRange takes a slice of 2: operator and version
+// and builds a versionRange, otherwise an error.
+func buildVersionRange(opStr, vStr string) (*versionRange, error) {
+ c := parseComparator(opStr)
+ if c == nil {
+ return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
+ }
+ v, err := Parse(vStr)
+ if err != nil {
+ return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
+ }
+
+ return &versionRange{
+ v: v,
+ c: c,
+ }, nil
+
+}
+
+// inArray checks if a byte is contained in an array of bytes
+func inArray(s byte, list []byte) bool {
+ for _, el := range list {
+ if el == s {
+ return true
+ }
+ }
+ return false
+}
+
+// splitAndTrim splits a range string by spaces and cleans whitespaces
+func splitAndTrim(s string) (result []string) {
+ last := 0
+ var lastChar byte
+ excludeFromSplit := []byte{'>', '<', '='}
+ for i := 0; i < len(s); i++ {
+ if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
+ if last < i-1 {
+ result = append(result, s[last:i])
+ }
+ last = i + 1
+ } else if s[i] != ' ' {
+ lastChar = s[i]
+ }
+ }
+ if last < len(s)-1 {
+ result = append(result, s[last:])
+ }
+
+ for i, v := range result {
+ result[i] = strings.Replace(v, " ", "", -1)
+ }
+
+ // parts := strings.Split(s, " ")
+ // for _, x := range parts {
+ // if s := strings.TrimSpace(x); len(s) != 0 {
+ // result = append(result, s)
+ // }
+ // }
+ return
+}
+
+// splitComparatorVersion splits the comparator from the version.
+// Input must be free of leading or trailing spaces.
+func splitComparatorVersion(s string) (string, string, error) {
+ i := strings.IndexFunc(s, unicode.IsDigit)
+ if i == -1 {
+ return "", "", fmt.Errorf("Could not get version from string: %q", s)
+ }
+ return strings.TrimSpace(s[0:i]), s[i:], nil
+}
+
+// getWildcardType will return the type of wildcard that the
+// passed version contains
+func getWildcardType(vStr string) wildcardType {
+ parts := strings.Split(vStr, ".")
+ nparts := len(parts)
+ wildcard := parts[nparts-1]
+
+ possibleWildcardType := wildcardTypefromInt(nparts)
+ if wildcard == "x" {
+ return possibleWildcardType
+ }
+
+ return noneWildcard
+}
+
+// createVersionFromWildcard will convert a wildcard version
+// into a regular version, replacing 'x's with '0's, handling
+// special cases like '1.x.x' and '1.x'
+func createVersionFromWildcard(vStr string) string {
+ // handle 1.x.x
+ vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
+ vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
+ parts := strings.Split(vStr2, ".")
+
+ // handle 1.x
+ if len(parts) == 2 {
+ return vStr2 + ".0"
+ }
+
+ return vStr2
+}
+
+// incrementMajorVersion will increment the major version
+// of the passed version
+func incrementMajorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return "", err
+ }
+ parts[0] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// incrementMajorVersion will increment the minor version
+// of the passed version
+func incrementMinorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return "", err
+ }
+ parts[1] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// expandWildcardVersion will expand wildcards inside versions
+// following these rules:
+//
+// * when dealing with patch wildcards:
+// >= 1.2.x will become >= 1.2.0
+// <= 1.2.x will become < 1.3.0
+// > 1.2.x will become >= 1.3.0
+// < 1.2.x will become < 1.2.0
+// != 1.2.x will become < 1.2.0 >= 1.3.0
+//
+// * when dealing with minor wildcards:
+// >= 1.x will become >= 1.0.0
+// <= 1.x will become < 2.0.0
+// > 1.x will become >= 2.0.0
+// < 1.0 will become < 1.0.0
+// != 1.x will become < 1.0.0 >= 2.0.0
+//
+// * when dealing with wildcards without
+// version operator:
+// 1.2.x will become >= 1.2.0 < 1.3.0
+// 1.x will become >= 1.0.0 < 2.0.0
+func expandWildcardVersion(parts [][]string) ([][]string, error) {
+ var expandedParts [][]string
+ for _, p := range parts {
+ var newParts []string
+ for _, ap := range p {
+ if strings.Contains(ap, "x") {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+
+ versionWildcardType := getWildcardType(vStr)
+ flatVersion := createVersionFromWildcard(vStr)
+
+ var resultOperator string
+ var shouldIncrementVersion bool
+ switch opStr {
+ case ">":
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ case ">=":
+ resultOperator = ">="
+ case "<":
+ resultOperator = "<"
+ case "<=":
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "", "=", "==":
+ newParts = append(newParts, ">="+flatVersion)
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "!=", "!":
+ newParts = append(newParts, "<"+flatVersion)
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ }
+
+ var resultVersion string
+ if shouldIncrementVersion {
+ switch versionWildcardType {
+ case patchWildcard:
+ resultVersion, _ = incrementMinorVersion(flatVersion)
+ case minorWildcard:
+ resultVersion, _ = incrementMajorVersion(flatVersion)
+ }
+ } else {
+ resultVersion = flatVersion
+ }
+
+ ap = resultOperator + resultVersion
+ }
+ newParts = append(newParts, ap)
+ }
+ expandedParts = append(expandedParts, newParts)
+ }
+
+ return expandedParts, nil
+}
+
+func parseComparator(s string) comparator {
+ switch s {
+ case "==":
+ fallthrough
+ case "":
+ fallthrough
+ case "=":
+ return compEQ
+ case ">":
+ return compGT
+ case ">=":
+ return compGE
+ case "<":
+ return compLT
+ case "<=":
+ return compLE
+ case "!":
+ fallthrough
+ case "!=":
+ return compNE
+ }
+
+ return nil
+}
+
+// MustParseRange is like ParseRange but panics if the range cannot be parsed.
+func MustParseRange(s string) Range {
+ r, err := ParseRange(s)
+ if err != nil {
+ panic(`semver: ParseRange(` + s + `): ` + err.Error())
+ }
+ return r
+}
diff --git a/hack/tools/vendor/github.com/blang/semver/v4/semver.go b/hack/tools/vendor/github.com/blang/semver/v4/semver.go
new file mode 100644
index 0000000000..307de610f9
--- /dev/null
+++ b/hack/tools/vendor/github.com/blang/semver/v4/semver.go
@@ -0,0 +1,476 @@
+package semver
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ numbers string = "0123456789"
+ alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
+ alphanum = alphas + numbers
+)
+
+// SpecVersion is the latest fully supported spec version of semver
+var SpecVersion = Version{
+ Major: 2,
+ Minor: 0,
+ Patch: 0,
+}
+
+// Version represents a semver compatible version
+type Version struct {
+ Major uint64
+ Minor uint64
+ Patch uint64
+ Pre []PRVersion
+ Build []string //No Precedence
+}
+
+// Version to string
+func (v Version) String() string {
+ b := make([]byte, 0, 5)
+ b = strconv.AppendUint(b, v.Major, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Minor, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Patch, 10)
+
+ if len(v.Pre) > 0 {
+ b = append(b, '-')
+ b = append(b, v.Pre[0].String()...)
+
+ for _, pre := range v.Pre[1:] {
+ b = append(b, '.')
+ b = append(b, pre.String()...)
+ }
+ }
+
+ if len(v.Build) > 0 {
+ b = append(b, '+')
+ b = append(b, v.Build[0]...)
+
+ for _, build := range v.Build[1:] {
+ b = append(b, '.')
+ b = append(b, build...)
+ }
+ }
+
+ return string(b)
+}
+
+// FinalizeVersion discards prerelease and build number and only returns
+// major, minor and patch number.
+func (v Version) FinalizeVersion() string {
+ b := make([]byte, 0, 5)
+ b = strconv.AppendUint(b, v.Major, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Minor, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Patch, 10)
+ return string(b)
+}
+
+// Equals checks if v is equal to o.
+func (v Version) Equals(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// EQ checks if v is equal to o.
+func (v Version) EQ(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// NE checks if v is not equal to o.
+func (v Version) NE(o Version) bool {
+ return (v.Compare(o) != 0)
+}
+
+// GT checks if v is greater than o.
+func (v Version) GT(o Version) bool {
+ return (v.Compare(o) == 1)
+}
+
+// GTE checks if v is greater than or equal to o.
+func (v Version) GTE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// GE checks if v is greater than or equal to o.
+func (v Version) GE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// LT checks if v is less than o.
+func (v Version) LT(o Version) bool {
+ return (v.Compare(o) == -1)
+}
+
+// LTE checks if v is less than or equal to o.
+func (v Version) LTE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// LE checks if v is less than or equal to o.
+func (v Version) LE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// Compare compares Versions v to o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v Version) Compare(o Version) int {
+ if v.Major != o.Major {
+ if v.Major > o.Major {
+ return 1
+ }
+ return -1
+ }
+ if v.Minor != o.Minor {
+ if v.Minor > o.Minor {
+ return 1
+ }
+ return -1
+ }
+ if v.Patch != o.Patch {
+ if v.Patch > o.Patch {
+ return 1
+ }
+ return -1
+ }
+
+ // Quick comparison if a version has no prerelease versions
+ if len(v.Pre) == 0 && len(o.Pre) == 0 {
+ return 0
+ } else if len(v.Pre) == 0 && len(o.Pre) > 0 {
+ return 1
+ } else if len(v.Pre) > 0 && len(o.Pre) == 0 {
+ return -1
+ }
+
+ i := 0
+ for ; i < len(v.Pre) && i < len(o.Pre); i++ {
+ if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
+ continue
+ } else if comp == 1 {
+ return 1
+ } else {
+ return -1
+ }
+ }
+
+ // If all pr versions are the equal but one has further prversion, this one greater
+ if i == len(v.Pre) && i == len(o.Pre) {
+ return 0
+ } else if i == len(v.Pre) && i < len(o.Pre) {
+ return -1
+ } else {
+ return 1
+ }
+
+}
+
+// IncrementPatch increments the patch version
+func (v *Version) IncrementPatch() error {
+ v.Patch++
+ return nil
+}
+
+// IncrementMinor increments the minor version
+func (v *Version) IncrementMinor() error {
+ v.Minor++
+ v.Patch = 0
+ return nil
+}
+
+// IncrementMajor increments the major version
+func (v *Version) IncrementMajor() error {
+ v.Major++
+ v.Minor = 0
+ v.Patch = 0
+ return nil
+}
+
+// Validate validates v and returns error in case
+func (v Version) Validate() error {
+ // Major, Minor, Patch already validated using uint64
+
+ for _, pre := range v.Pre {
+ if !pre.IsNum { //Numeric prerelease versions already uint64
+ if len(pre.VersionStr) == 0 {
+ return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
+ }
+ if !containsOnly(pre.VersionStr, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
+ }
+ }
+ }
+
+ for _, build := range v.Build {
+ if len(build) == 0 {
+ return fmt.Errorf("Build meta data can not be empty %q", build)
+ }
+ if !containsOnly(build, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
+ }
+ }
+
+ return nil
+}
+
+// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
+func New(s string) (*Version, error) {
+ v, err := Parse(s)
+ vp := &v
+ return vp, err
+}
+
+// Make is an alias for Parse, parses version string and returns a validated Version or error
+func Make(s string) (Version, error) {
+ return Parse(s)
+}
+
+// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
+// specs to be parsed by this library. It does so by normalizing versions before passing them to
+// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions
+// with only major and minor components specified, and removes leading 0s.
+func ParseTolerant(s string) (Version, error) {
+ s = strings.TrimSpace(s)
+ s = strings.TrimPrefix(s, "v")
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ // Remove leading zeros.
+ for i, p := range parts {
+ if len(p) > 1 {
+ p = strings.TrimLeft(p, "0")
+ if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") {
+ p = "0" + p
+ }
+ parts[i] = p
+ }
+ }
+ // Fill up shortened versions.
+ if len(parts) < 3 {
+ if strings.ContainsAny(parts[len(parts)-1], "+-") {
+ return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
+ }
+ for len(parts) < 3 {
+ parts = append(parts, "0")
+ }
+ }
+ s = strings.Join(parts, ".")
+
+ return Parse(s)
+}
+
+// Parse parses version string and returns a validated Version or error
+func Parse(s string) (Version, error) {
+ if len(s) == 0 {
+ return Version{}, errors.New("Version string empty")
+ }
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ if len(parts) != 3 {
+ return Version{}, errors.New("No Major.Minor.Patch elements found")
+ }
+
+ // Major
+ if !containsOnly(parts[0], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
+ }
+ if hasLeadingZeroes(parts[0]) {
+ return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
+ }
+ major, err := strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ // Minor
+ if !containsOnly(parts[1], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
+ }
+ if hasLeadingZeroes(parts[1]) {
+ return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
+ }
+ minor, err := strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v := Version{}
+ v.Major = major
+ v.Minor = minor
+
+ var build, prerelease []string
+ patchStr := parts[2]
+
+ if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
+ build = strings.Split(patchStr[buildIndex+1:], ".")
+ patchStr = patchStr[:buildIndex]
+ }
+
+ if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
+ prerelease = strings.Split(patchStr[preIndex+1:], ".")
+ patchStr = patchStr[:preIndex]
+ }
+
+ if !containsOnly(patchStr, numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
+ }
+ if hasLeadingZeroes(patchStr) {
+ return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
+ }
+ patch, err := strconv.ParseUint(patchStr, 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v.Patch = patch
+
+ // Prerelease
+ for _, prstr := range prerelease {
+ parsedPR, err := NewPRVersion(prstr)
+ if err != nil {
+ return Version{}, err
+ }
+ v.Pre = append(v.Pre, parsedPR)
+ }
+
+ // Build meta data
+ for _, str := range build {
+ if len(str) == 0 {
+ return Version{}, errors.New("Build meta data is empty")
+ }
+ if !containsOnly(str, alphanum) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
+ }
+ v.Build = append(v.Build, str)
+ }
+
+ return v, nil
+}
+
+// MustParse is like Parse but panics if the version cannot be parsed.
+func MustParse(s string) Version {
+ v, err := Parse(s)
+ if err != nil {
+ panic(`semver: Parse(` + s + `): ` + err.Error())
+ }
+ return v
+}
+
+// PRVersion represents a PreRelease Version
+type PRVersion struct {
+ VersionStr string
+ VersionNum uint64
+ IsNum bool
+}
+
+// NewPRVersion creates a new valid prerelease version
+func NewPRVersion(s string) (PRVersion, error) {
+ if len(s) == 0 {
+ return PRVersion{}, errors.New("Prerelease is empty")
+ }
+ v := PRVersion{}
+ if containsOnly(s, numbers) {
+ if hasLeadingZeroes(s) {
+ return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
+ }
+ num, err := strconv.ParseUint(s, 10, 64)
+
+ // Might never be hit, but just in case
+ if err != nil {
+ return PRVersion{}, err
+ }
+ v.VersionNum = num
+ v.IsNum = true
+ } else if containsOnly(s, alphanum) {
+ v.VersionStr = s
+ v.IsNum = false
+ } else {
+ return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
+ }
+ return v, nil
+}
+
+// IsNumeric checks if prerelease-version is numeric
+func (v PRVersion) IsNumeric() bool {
+ return v.IsNum
+}
+
+// Compare compares two PreRelease Versions v and o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v PRVersion) Compare(o PRVersion) int {
+ if v.IsNum && !o.IsNum {
+ return -1
+ } else if !v.IsNum && o.IsNum {
+ return 1
+ } else if v.IsNum && o.IsNum {
+ if v.VersionNum == o.VersionNum {
+ return 0
+ } else if v.VersionNum > o.VersionNum {
+ return 1
+ } else {
+ return -1
+ }
+ } else { // both are Alphas
+ if v.VersionStr == o.VersionStr {
+ return 0
+ } else if v.VersionStr > o.VersionStr {
+ return 1
+ } else {
+ return -1
+ }
+ }
+}
+
+// PreRelease version to string
+func (v PRVersion) String() string {
+ if v.IsNum {
+ return strconv.FormatUint(v.VersionNum, 10)
+ }
+ return v.VersionStr
+}
+
+func containsOnly(s string, set string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(set, r)
+ }) == -1
+}
+
+func hasLeadingZeroes(s string) bool {
+ return len(s) > 1 && s[0] == '0'
+}
+
+// NewBuildVersion creates a new valid build version
+func NewBuildVersion(s string) (string, error) {
+ if len(s) == 0 {
+ return "", errors.New("Buildversion is empty")
+ }
+ if !containsOnly(s, alphanum) {
+ return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
+ }
+ return s, nil
+}
+
+// FinalizeVersion returns the major, minor and patch number only and discards
+// prerelease and build number.
+func FinalizeVersion(s string) (string, error) {
+ v, err := Parse(s)
+ if err != nil {
+ return "", err
+ }
+ v.Pre = nil
+ v.Build = nil
+
+ finalVer := v.String()
+ return finalVer, nil
+}
diff --git a/hack/tools/vendor/github.com/blang/semver/v4/sort.go b/hack/tools/vendor/github.com/blang/semver/v4/sort.go
new file mode 100644
index 0000000000..e18f880826
--- /dev/null
+++ b/hack/tools/vendor/github.com/blang/semver/v4/sort.go
@@ -0,0 +1,28 @@
+package semver
+
+import (
+ "sort"
+)
+
+// Versions represents multiple versions.
+type Versions []Version
+
+// Len returns length of version collection
+func (s Versions) Len() int {
+ return len(s)
+}
+
+// Swap swaps two versions inside the collection by its indices
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Less checks if version at index i is less than version at index j
+func (s Versions) Less(i, j int) bool {
+ return s[i].LT(s[j])
+}
+
+// Sort sorts a slice of versions
+func Sort(versions []Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/hack/tools/vendor/github.com/blang/semver/v4/sql.go b/hack/tools/vendor/github.com/blang/semver/v4/sql.go
new file mode 100644
index 0000000000..db958134f3
--- /dev/null
+++ b/hack/tools/vendor/github.com/blang/semver/v4/sql.go
@@ -0,0 +1,30 @@
+package semver
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements the database/sql.Scanner interface.
+func (v *Version) Scan(src interface{}) (err error) {
+ var str string
+ switch src := src.(type) {
+ case string:
+ str = src
+ case []byte:
+ str = string(src)
+ default:
+ return fmt.Errorf("version.Scan: cannot convert %T to string", src)
+ }
+
+ if t, err := Parse(str); err == nil {
+ *v = t
+ }
+
+ return
+}
+
+// Value implements the database/sql/driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
diff --git a/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml
index bde0ae54e4..bc79b83961 100644
--- a/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml
+++ b/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml
@@ -39,36 +39,28 @@ linters:
enable-all: true
disable:
- cyclop
- - deadcode
- depguard
- dupl
- dupword
- - exhaustivestruct
- exhaustruct
+ - exportloopref
- forbidigo
- funlen
- gci
- gocognit
- gocyclo
- godox
- - golint
- - gomnd
- - ifshort
- - interfacer
+ - mnd
- lll
- maintidx
- - maligned
- nakedret
- nestif
- nlreturn
- - nosnakecase
- paralleltest
- prealloc
- rowserrcheck
- - scopelint
- - structcheck
- testpackage
- - varcheck
+ - tparallel
- varnamelen
- wastedassign
@@ -76,5 +68,4 @@ issues:
exclude-use-default: true
max-issues-per-linter: 0
max-same-issues: 0
-
# vim: set sw=2 ts=2 et:
diff --git a/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go
index 46d5019a78..e51df89c6c 100644
--- a/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go
+++ b/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go
@@ -3,6 +3,7 @@ package wsl
import (
"flag"
"go/ast"
+ "go/token"
"strings"
"golang.org/x/tools/go/analysis"
@@ -78,12 +79,17 @@ func (wa *wslAnalyzer) flags() flag.FlagSet {
func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) {
for _, file := range pass.Files {
- if !wa.config.IncludeGenerated && ast.IsGenerated(file) {
+ filename := getFilename(pass.Fset, file)
+ if !strings.HasSuffix(filename, ".go") {
continue
}
- filename := pass.Fset.PositionFor(file.Pos(), false).Filename
- if !strings.HasSuffix(filename, ".go") {
+ // if the file is related to cgo the filename of the unadjusted position is a not a '.go' file.
+ fn := pass.Fset.PositionFor(file.Pos(), false).Filename
+
+ // The file is skipped if the "unadjusted" file is a Go file, and it's a generated file (ex: "_test.go" file).
+ // The other non-Go files are skipped by the first 'if' with the adjusted position.
+ if !wa.config.IncludeGenerated && ast.IsGenerated(file) && strings.HasSuffix(fn, ".go") {
continue
}
@@ -127,7 +133,7 @@ type multiStringValue struct {
// Set implements the flag.Value interface and will overwrite the pointer to the
// slice with a new pointer after splitting the flag by comma.
func (m *multiStringValue) Set(value string) error {
- s := []string{}
+ var s []string
for _, v := range strings.Split(value, ",") {
s = append(s, strings.TrimSpace(v))
@@ -146,3 +152,12 @@ func (m *multiStringValue) String() string {
return strings.Join(*m.slicePtr, ", ")
}
+
+func getFilename(fset *token.FileSet, file *ast.File) string {
+ filename := fset.PositionFor(file.Pos(), true).Filename
+ if !strings.HasSuffix(filename, ".go") {
+ return fset.PositionFor(file.Pos(), false).Filename
+ }
+
+ return filename
+}
diff --git a/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go b/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go
index 76f4abf617..44c7abe219 100644
--- a/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go
+++ b/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go
@@ -353,7 +353,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) {
return false
}
- for j := 0; j < n; j++ {
+ for j := range n {
s1 := statements[i+j]
s2 := statements[i+j+1]
@@ -1113,8 +1113,8 @@ func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne
return
}
- blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line
- blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line
+ blockStartLine = p.fileSet.Position(blockStartPos).Line
+ blockEndLine = p.fileSet.Position(blockEndPos).Line
// No whitespace possible if LBrace and RBrace is on the same line.
if blockStartLine == blockEndLine {
@@ -1362,14 +1362,14 @@ func isExampleFunc(ident *ast.Ident) bool {
}
func (p *processor) nodeStart(node ast.Node) int {
- return p.fileSet.PositionFor(node.Pos(), false).Line
+ return p.fileSet.Position(node.Pos()).Line
}
func (p *processor) nodeEnd(node ast.Node) int {
- line := p.fileSet.PositionFor(node.End(), false).Line
+ line := p.fileSet.Position(node.End()).Line
if isEmptyLabeledStmt(node) {
- return p.fileSet.PositionFor(node.Pos(), false).Line
+ return p.fileSet.Position(node.Pos()).Line
}
return line
@@ -1408,7 +1408,7 @@ func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string)
}
func (p *processor) addWarning(w string, pos token.Pos, t interface{}) {
- position := p.fileSet.PositionFor(pos, false)
+ position := p.fileSet.Position(pos)
p.warnings = append(p.warnings,
fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t),
diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go
index f68170fb31..ebf2a0dbea 100644
--- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go
+++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go
@@ -8,12 +8,12 @@ import (
"strings"
"sync"
- "github.com/butuzov/ireturn/analyzer/internal/config"
- "github.com/butuzov/ireturn/analyzer/internal/types"
-
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
+
+ "github.com/butuzov/ireturn/analyzer/internal/config"
+ "github.com/butuzov/ireturn/analyzer/internal/types"
)
const name string = "ireturn" // linter name
@@ -23,11 +23,11 @@ type validator interface {
}
type analyzer struct {
- once sync.Once
- mu sync.RWMutex
- handler validator
- err error
- diabledNolint bool
+ once sync.Once
+ mu sync.RWMutex
+ handler validator
+ err error
+ disabledNolint bool
found []analysis.Diagnostic
}
@@ -63,7 +63,7 @@ func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) {
}
// 003. Is it allowed to be checked?
- if !a.diabledNolint && hasDisallowDirective(f.Doc) {
+ if !a.disabledNolint && hasDisallowDirective(f.Doc) {
return
}
@@ -115,7 +115,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) {
// First: checking nonolint directive
val := fs.Lookup("nonolint")
if val != nil {
- a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true"
+ a.disabledNolint = fs.Lookup("nonolint").Value.String() == "true"
}
// Second: validators implementation next
@@ -128,7 +128,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) {
}
func NewAnalyzer() *analysis.Analyzer {
- a := analyzer{} //nolint: exhaustivestruct
+ a := analyzer{}
return &analysis.Analyzer{
Name: name,
@@ -196,7 +196,7 @@ func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{}
typeParams := val.String()
prefix, suffix := "interface{", "}"
- if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple
+ if strings.HasPrefix(typeParams, prefix) { //nolint:gosimple
typeParams = typeParams[len(prefix):]
}
if strings.HasSuffix(typeParams, suffix) {
diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go
index 6a294ca35f..da101c7862 100644
--- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go
+++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go
@@ -2,7 +2,7 @@ package config
import "github.com/butuzov/ireturn/analyzer/internal/types"
-// allowConfig specifies a list of interfaces (keywords, patters and regular expressions)
+// allowConfig specifies a list of interfaces (keywords, patterns and regular expressions)
// that are allowed by ireturn as valid to return, any non listed interface are rejected.
type allowConfig struct {
*defaultConfig
diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go
index 6aa04e52e8..d6914af862 100644
--- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go
+++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go
@@ -10,7 +10,6 @@ import (
var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time")
-// nolint: exhaustivestruct
func DefaultValidatorConfig() *allowConfig {
return allowAll([]string{
types.NameEmpty, // "empty": empty interfaces (interface{})
diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go
index bef6913bb8..b2cde910ce 100644
--- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go
+++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go
@@ -2,7 +2,7 @@ package config
import "github.com/butuzov/ireturn/analyzer/internal/types"
-// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions)
+// rejectConfig specifies a list of interfaces (keywords, patterns and regular expressions)
// that are rejected by ireturn as valid to return, any non listed interface are allowed.
type rejectConfig struct {
*defaultConfig
diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go
index 5e576374d5..0f4286515f 100644
--- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go
+++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go
@@ -47,7 +47,7 @@ func (i IFace) HashString() string {
}
func (i IFace) ExportDiagnostic() analysis.Diagnostic {
- return analysis.Diagnostic{ //nolint: exhaustivestruct
+ return analysis.Diagnostic{
Pos: i.Pos,
Message: i.String(),
}
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md
index 3dcc01e960..da30c8e00f 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md
+++ b/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md
@@ -1,201 +1,55 @@
-
+
+| Function | Mirror |
+|----------|--------|
+| `func (*bufio.Writer) Write([]byte) (int, error)` | `func (*bufio.Writer) WriteString(string) (int, error)` |
+| `func (*bufio.Writer) WriteRune(rune) (int, error)` | `func (*bufio.Writer) WriteString(string) (int, error)` |
+| `func (*bytes.Buffer) Write([]byte) (int, error)` | `func (*bytes.Buffer) WriteString(string) (int, error)` |
+| `func (*bytes.Buffer) WriteRune(rune) (int, error)` | `func (*bytes.Buffer) WriteString(string) (int, error)` |
+| `func bytes.Compare([]byte, []byte) int` | `func strings.Compare(string, string) int` |
+| `func bytes.Contains([]byte, []byte) bool` | `func strings.Contains(string, string) bool` |
+| `func bytes.ContainsAny([]byte, string) bool` | `func strings.ContainsAny(string, string) bool` |
+| `func bytes.ContainsRune([]byte, byte) bool` | `func strings.ContainsRune(string, byte) bool` |
+| `func bytes.Count([]byte, []byte) int` | `func strings.Count(string, string) int` |
+| `func bytes.EqualFold([]byte, []byte) bool` | `func strings.EqualFold(string, string) bool` |
+| `func bytes.HasPrefix([]byte, []byte) bool` | `func strings.HasPrefix(string, string) bool` |
+| `func bytes.HasSuffix([]byte, []byte) bool` | `func strings.HasSuffix(string, string) bool` |
+| `func bytes.Index([]byte, []byte) int` | `func strings.Index(string, string) int` |
+| `func bytes.IndexAny([]byte, string) int` | `func strings.IndexAny(string, string) int` |
+| `func bytes.IndexByte([]byte, byte) int` | `func strings.IndexByte(string, byte) int` |
+| `func bytes.IndexFunc([]byte, func(rune) bool) int` | `func strings.IndexFunc(string, func(rune) bool) int` |
+| `func bytes.IndexRune([]byte, rune) int` | `func strings.IndexRune(string, rune) int` |
+| `func bytes.LastIndex([]byte, []byte) int` | `func strings.LastIndex(string, string) int` |
+| `func bytes.LastIndexAny([]byte, string) int` | `func strings.LastIndexAny(string, string) int` |
+| `func bytes.LastIndexByte([]byte, byte) int` | `func strings.LastIndexByte(string, byte) int` |
+| `func bytes.LastIndexFunc([]byte, func(rune) bool) int` | `func strings.LastIndexFunc(string, func(rune) bool) int` |
+| `func bytes.NewBuffer([]byte) *bytes.Buffer` | `func bytes.NewBufferString(string) *bytes.Buffer` |
+| `func (*httptest.ResponseRecorder) Write([]byte) (int, error)` | `func (*httptest.ResponseRecorder) WriteString(string) (int, error)` |
+| `func maphash.Bytes([]byte) uint64` | `func maphash.String(string) uint64` |
+| `func (*maphash.Hash) Write([]byte) (int, error)` | `func (*maphash.Hash) WriteString(string) (int, error)` |
+| `func (*os.File) Write([]byte) (int, error)` | `func (*os.File) WriteString(string) (int, error)` |
+| `func regexp.Match(string, []byte) (bool, error)` | `func regexp.MatchString(string, string) (bool, error)` |
+| `func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int` | `func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int` |
+| `func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int` | `func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int` |
+| `func (*regexp.Regexp) FindIndex([]byte) []int` | `func (*regexp.Regexp) FindStringIndex(string) []int` |
+| `func (*regexp.Regexp) FindSubmatchIndex([]byte) []int` | `func (*regexp.Regexp) FindStringSubmatchIndex(string) []int` |
+| `func (*regexp.Regexp) Match([]byte) bool` | `func (*regexp.Regexp) MatchString(string) bool` |
+| `func (*strings.Builder) Write([]byte) (int, error)` | `func (*strings.Builder) WriteString(string) (int, error)` |
+| `func (*strings.Builder) WriteRune(rune) (int, error)` | `func (*strings.Builder) WriteString(string) (int, error)` |
+| `func strings.Compare(string) int` | `func bytes.Compare([]byte) int` |
+| `func strings.Contains(string) bool` | `func bytes.Contains([]byte) bool` |
+| `func strings.ContainsAny(string) bool` | `func bytes.ContainsAny([]byte) bool` |
+| `func strings.ContainsRune(string) bool` | `func bytes.ContainsRune([]byte) bool` |
+| `func strings.EqualFold(string) bool` | `func bytes.EqualFold([]byte) bool` |
+| `func strings.HasPrefix(string) bool` | `func bytes.HasPrefix([]byte) bool` |
+| `func strings.HasSuffix(string) bool` | `func bytes.HasSuffix([]byte) bool` |
+| `func strings.Index(string) int` | `func bytes.Index([]byte) int` |
+| `func strings.IndexFunc(string, func(r rune) bool) int` | `func bytes.IndexFunc([]byte, func(r rune) bool) int` |
+| `func strings.LastIndex(string) int` | `func bytes.LastIndex([]byte) int` |
+| `func strings.LastIndexAny(string) int` | `func bytes.LastIndexAny([]byte) int` |
+| `func strings.LastIndexFunc(string, func(r rune) bool) int` | `func bytes.LastIndexFunc([]byte, func(r rune) bool) int` |
+| `func utf8.DecodeLastRune([]byte) (rune, int)` | `func utf8.DecodeLastRuneInString(string) (rune, int)` |
+| `func utf8.DecodeRune([]byte) (rune, int)` | `func utf8.DecodeRuneInString(string) (rune, int)` |
+| `func utf8.FullRune([]byte) bool` | `func utf8.FullRuneInString(string) bool` |
+| `func utf8.RuneCount([]byte) int` | `func utf8.RuneCountInString(string) int` |
+| `func utf8.Valid([]byte) bool` | `func utf8.ValidString(string) bool` |
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/Makefile b/hack/tools/vendor/github.com/butuzov/mirror/Makefile
index ac267208fb..dab6f160ae 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/Makefile
+++ b/hack/tools/vendor/github.com/butuzov/mirror/Makefile
@@ -10,7 +10,8 @@ endef
# Generate Artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
generate: ## Generate Assets
- $(MAKE)
+ $(MAKE) generate-tests
+ $(MAKE) generate-mirror-table
generate-tests: ## Generates Assets at testdata
go run ./cmd/internal/tests/ "$(PWD)/testdata"
@@ -52,7 +53,7 @@ tests-summary: bin/tparse
lints: ## Run golangci-lint
lints: bin/golangci-lint
lints:
- golangci-lint run --no-config ./... --skip-dirs "^(cmd|testdata)"
+ golangci-lint run --no-config ./... --exclude-dirs "^(cmd|testdata)"
cover: ## Run Coverage
@@ -71,8 +72,8 @@ bin/tparse: INSTALL_URL=github.com/mfridman/tparse@v0.13.2
bin/tparse:
$(call install_go_bin, tparse, $(INSTALL_URL))
-bin/golangci-lint: ## Installs golangci-lint@v1.55.2 (if not exists)
-bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.55.2
+bin/golangci-lint: ## Installs golangci-lint@v1.62.0 (if not exists)
+bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.62.0
bin/golangci-lint:
$(call install_go_bin, golangci-lint, $(INSTALL_URL))
@@ -99,7 +100,7 @@ help: dep-gawk
@ echo ""
-# Helper Mehtods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Helper Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dep-gawk:
@ if [ -z "$(shell command -v gawk)" ]; then \
if [ -x /usr/local/bin/brew ]; then $(MAKE) _brew_gawk_install; exit 0; fi; \
@@ -111,21 +112,21 @@ dep-gawk:
fi
_brew_gawk_install:
- @ echo "Instaling gawk using brew... "
+ @ echo "Installing gawk using brew... "
@ brew install gawk --quiet
@ echo "done"
_ubuntu_gawk_install:
- @ echo "Instaling gawk using apt-get... "
+ @ echo "Installing gawk using apt-get... "
@ apt-get -q install gawk -y
@ echo "done"
_alpine_gawk_install:
- @ echo "Instaling gawk using yum... "
+ @ echo "Installing gawk using yum... "
@ apk add --update --no-cache gawk
@ echo "done"
_centos_gawk_install:
- @ echo "Instaling gawk using yum... "
+ @ echo "Installing gawk using yum... "
@ yum install -q -y gawk;
@ echo "done"
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go b/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go
index 13ded46c6d..b15019ce1f 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go
+++ b/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go
@@ -44,9 +44,9 @@ func Run(pass *analysis.Pass, withTests bool) []*checker.Violation {
BytesFunctions, BytesBufferMethods,
RegexpFunctions, RegexpRegexpMethods,
StringFunctions, StringsBuilderMethods,
+ MaphashMethods, MaphashFunctions,
BufioMethods, HTTPTestMethods,
- OsFileMethods, MaphashMethods,
- UTF8Functions,
+ OsFileMethods, UTF8Functions,
)
check.Type = checker.WrapType(pass.TypesInfo)
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go b/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go
index 0aa43ff7bb..345a64123e 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go
+++ b/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go
@@ -2,35 +2,66 @@ package mirror
import "github.com/butuzov/mirror/internal/checker"
-var MaphashMethods = []checker.Violation{
- { // (*hash/maphash).Write
- Targets: checker.Bytes,
- Type: checker.Method,
- Package: "hash/maphash",
- Struct: "Hash",
- Caller: "Write",
- Args: []int{0},
- AltCaller: "WriteString",
+var (
+ MaphashFunctions = []checker.Violation{
+ { // maphash.Bytes
+ Targets: checker.Bytes,
+ Type: checker.Function,
+ Package: "hash/maphash",
+ Caller: "Bytes",
+ Args: []int{1},
+ AltCaller: "String",
- Generate: &checker.Generate{
- PreCondition: `h := maphash.Hash{}`,
- Pattern: `Write($0)`,
- Returns: []string{"int", "error"},
+ Generate: &checker.Generate{
+ Pattern: `Bytes(maphash.MakeSeed(), $0)`,
+ Returns: []string{"uint64"},
+ },
},
- },
- { // (*hash/maphash).WriteString
- Targets: checker.Strings,
- Type: checker.Method,
- Package: "hash/maphash",
- Struct: "Hash",
- Caller: "WriteString",
- Args: []int{0},
- AltCaller: "Write",
+ { // maphash.String
+ Targets: checker.Strings,
+ Type: checker.Function,
+ Package: "hash/maphash",
+ Caller: "String",
+ Args: []int{1},
+ AltCaller: "Bytes",
- Generate: &checker.Generate{
- PreCondition: `h := maphash.Hash{}`,
- Pattern: `WriteString($0)`,
- Returns: []string{"int", "error"},
+ Generate: &checker.Generate{
+ Pattern: `String(maphash.MakeSeed(), $0)`,
+ Returns: []string{"uint64"},
+ },
},
- },
-}
+ }
+
+ MaphashMethods = []checker.Violation{
+ { // (*hash/maphash).Write
+ Targets: checker.Bytes,
+ Type: checker.Method,
+ Package: "hash/maphash",
+ Struct: "Hash",
+ Caller: "Write",
+ Args: []int{0},
+ AltCaller: "WriteString",
+
+ Generate: &checker.Generate{
+ PreCondition: `h := maphash.Hash{}`,
+ Pattern: `Write($0)`,
+ Returns: []string{"int", "error"},
+ },
+ },
+ { // (*hash/maphash).WriteString
+ Targets: checker.Strings,
+ Type: checker.Method,
+ Package: "hash/maphash",
+ Struct: "Hash",
+ Caller: "WriteString",
+ Args: []int{0},
+ AltCaller: "Write",
+
+ Generate: &checker.Generate{
+ PreCondition: `h := maphash.Hash{}`,
+ Pattern: `WriteString($0)`,
+ Returns: []string{"int", "error"},
+ },
+ },
+ }
+)
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go
index c1a9416314..fb9ba41729 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go
+++ b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go
@@ -9,12 +9,12 @@ import (
"strings"
)
-// Checker will perform standart check on package and its methods.
+// Checker will perform standard check on package and its methods.
type Checker struct {
Violations []Violation // List of available violations
Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct
Type func(ast.Expr) string // Type Checker closure.
- Print func(ast.Node) []byte // String representation of the expresion.
+ Print func(ast.Node) []byte // String representation of the expression.
}
func New(violations ...[]Violation) Checker {
@@ -76,7 +76,7 @@ func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool
continue
}
- // is it convertsion call
+ // is it conversion call
if !c.callConverts(call) {
continue
}
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go
index 3d8acf1415..c2c1492086 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go
+++ b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go
@@ -28,7 +28,7 @@ const (
UntypedRune string = "untyped rune"
)
-// Violation describs what message we going to give to a particular code violation
+// Violation describes what message we going to give to a particular code violation
type Violation struct {
Type ViolationType //
Args []int // Indexes of the arguments needs to be checked
@@ -143,7 +143,7 @@ func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic {
v.AltPackage = v.Package
}
- // Hooray! we dont need to change package and redo imports.
+ // Hooray! we don't need to change package and redo imports.
if v.Type == Function && v.AltPackage == v.Package && noNl {
diagnostic.SuggestedFixes = []analysis.SuggestedFix{{
Message: "Fix Issue With",
@@ -166,7 +166,7 @@ type GolangIssue struct {
Original string
}
-// Issue intended to be used only within `golangci-lint`, bu you can use use it
+// Issue intended to be used only within `golangci-lint`, but you can use it
// alongside Diagnostic if you wish.
func (v *Violation) Issue(fSet *token.FileSet) GolangIssue {
issue := GolangIssue{
diff --git a/hack/tools/vendor/github.com/butuzov/mirror/readme.md b/hack/tools/vendor/github.com/butuzov/mirror/readme.md
index f830ea72ea..f5cfa47a68 100644
--- a/hack/tools/vendor/github.com/butuzov/mirror/readme.md
+++ b/hack/tools/vendor/github.com/butuzov/mirror/readme.md
@@ -2,6 +2,13 @@
`mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib.
+---
+
+[](https://u24.gov.ua/)
+[](https://github.com/butuzov)
+
+---
+
## Linter Use Cases
### `github.com/argoproj/argo-cd`
@@ -86,13 +93,13 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi
- flag `--tests` (e.g. `--tests=false`)
- flag `--skip-files` (e.g. `--skip-files="_test.go"`)
- - yaml confguration `run.skip-files`:
+ - yaml configuration `run.skip-files`:
```yaml
run:
skip-files:
- '(.+)_test\.go'
```
- - yaml confguration `issues.exclude-rules`:
+ - yaml configuration `issues.exclude-rules`:
```yaml
issues:
exclude-rules:
@@ -106,7 +113,7 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi
```shell
# Update Assets (testdata/(strings|bytes|os|utf8|maphash|regexp|bufio).go)
-(task|make) generated
+(task|make) generate
# Run Tests
(task|make) tests
# Lint Code
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore b/hack/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 0000000000..50d95c548b
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE b/hack/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 0000000000..89b8179965
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/README.md b/hack/tools/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 0000000000..9433004a28
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,30 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 0000000000..3676ee405d
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+ // NextBackOff returns the duration to wait before retrying the operation,
+ // or backoff. Stop to indicate that no more retries should be made.
+ //
+ // Example usage:
+ //
+ // duration := backoff.NextBackOff();
+ // if (duration == backoff.Stop) {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
+ //
+ NextBackOff() time.Duration
+
+ // Reset to initial state.
+ Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+ Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset() {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+ return &ConstantBackOff{Interval: d}
+}
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/context.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 0000000000..48482330eb
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+ "context"
+ "time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+ BackOff
+ Context() context.Context
+}
+
+type backOffContext struct {
+ BackOff
+ ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ if b, ok := b.(*backOffContext); ok {
+ return &backOffContext{
+ BackOff: b.BackOff,
+ ctx: ctx,
+ }
+ }
+
+ return &backOffContext{
+ BackOff: b,
+ ctx: ctx,
+ }
+}
+
+func getContext(b BackOff) context.Context {
+ if cb, ok := b.(BackOffContext); ok {
+ return cb.Context()
+ }
+ if tb, ok := b.(*backOffTries); ok {
+ return getContext(tb.delegate)
+ }
+ return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+ return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+ select {
+ case <-b.ctx.Done():
+ return Stop
+ default:
+ return b.BackOff.NextBackOff()
+ }
+}
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 0000000000..aac99f196a
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,216 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+ 10 19.210 backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+ // After MaxElapsedTime the ExponentialBackOff returns Stop.
+ // It never stops if MaxElapsedTime == 0.
+ MaxElapsedTime time.Duration
+ Stop time.Duration
+ Clock Clock
+
+ currentInterval time.Duration
+ startTime time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+ Now() time.Time
+}
+
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+ DefaultMaxElapsedTime = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
+ b := &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ Stop: Stop,
+ Clock: SystemClock,
+ }
+ for _, fn := range opts {
+ fn(b)
+ }
+ b.Reset()
+ return b
+}
+
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+ return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+ b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ // Make sure we have not gone over the maximum elapsed time.
+ elapsed := b.GetElapsedTime()
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+ return b.Stop
+ }
+ return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+ return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/retry.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 0000000000..b9c0c51cd7
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+ "errors"
+ "time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+ return func() (struct{}, error) {
+ return struct{}{}, o()
+ }
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+ return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+ return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+ return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+ return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+ return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ var (
+ err error
+ next time.Duration
+ res T
+ )
+ if t == nil {
+ t = &defaultTimer{}
+ }
+
+ defer func() {
+ t.Stop()
+ }()
+
+ ctx := getContext(b)
+
+ b.Reset()
+ for {
+ res, err = operation()
+ if err == nil {
+ return res, nil
+ }
+
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, permanent.Err
+ }
+
+ if next = b.NextBackOff(); next == Stop {
+ if cerr := ctx.Err(); cerr != nil {
+ return res, cerr
+ }
+
+ return res, err
+ }
+
+ if notify != nil {
+ notify(err, next)
+ }
+
+ t.Start(next)
+
+ select {
+ case <-ctx.Done():
+ return res, ctx.Err()
+ case <-t.C():
+ }
+ }
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+ _, ok := target.(*PermanentError)
+ return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 0000000000..df9d68bce5
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ b BackOff
+ ctx context.Context
+ timer Timer
+ stop chan struct{}
+ stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once. The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+ return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+ if timer == nil {
+ timer = &defaultTimer{}
+ }
+ c := make(chan time.Time)
+ t := &Ticker{
+ C: c,
+ c: c,
+ b: b,
+ ctx: getContext(b),
+ timer: timer,
+ stop: make(chan struct{}),
+ }
+ t.b.Reset()
+ go t.run()
+ return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+ t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+ c := t.c
+ defer close(c)
+
+ // Ticker is guaranteed to tick at least once.
+ afterC := t.send(time.Now())
+
+ for {
+ if afterC == nil {
+ return
+ }
+
+ select {
+ case tick := <-afterC:
+ afterC = t.send(tick)
+ case <-t.stop:
+ t.c = nil // Prevent future ticks from being sent to the channel.
+ return
+ case <-t.ctx.Done():
+ return
+ }
+ }
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+ select {
+ case t.c <- tick:
+ case <-t.stop:
+ return nil
+ }
+
+ next := t.b.NextBackOff()
+ if next == Stop {
+ t.Stop()
+ return nil
+ }
+
+ t.timer.Start(next)
+ return t.timer.C()
+}
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/timer.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 0000000000..8120d0213c
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+ Start(duration time.Duration)
+ Stop()
+ C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+ timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+ return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+ if t.timer == nil {
+ t.timer = time.NewTimer(duration)
+ } else {
+ t.timer.Reset(duration)
+ }
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+}
diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/tries.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 0000000000..28d58ca37c
--- /dev/null
+++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+ return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+ delegate BackOff
+ maxTries uint64
+ numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+ if b.maxTries == 0 {
+ return Stop
+ }
+ if b.maxTries > 0 {
+ if b.maxTries <= b.numTries {
+ return Stop
+ }
+ b.numTries++
+ }
+ return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+ b.numTries = 0
+ b.delegate.Reset()
+}
diff --git a/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml b/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml
index 2ad830d1b2..b240f85ce9 100644
--- a/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml
+++ b/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml
@@ -1,6 +1,9 @@
linters-settings:
gci:
- local-prefixes: github.com/ckaznocha/intrange
+ sections:
+ - standard
+ - default
+ - localmodule
gocritic:
enabled-tags:
- diagnostic
@@ -10,10 +13,7 @@ linters-settings:
- style
goimports:
local-prefixes: github.com/ckaznocha/intrange
- golint:
- min-confidence: 0
govet:
- check-shadowing: true
enable:
- asmdecl
- assign
@@ -24,6 +24,7 @@ linters-settings:
- cgocall
- composite
- copylock
+ - copyloopvar
- deepequalerrors
- errorsas
- fieldalignment
@@ -57,18 +58,16 @@ linters:
- dupl
- errcheck
- errorlint
- - exportloopref
- gci
- gochecknoinits
- goconst
- gocritic
- godot
- godox
- - goerr113
+ - err113
- gofmt
- gofumpt
- goimports
- - gomnd
- goprintffuncname
- gosec
- gosimple
@@ -94,6 +93,6 @@ linters:
- wastedassign
- whitespace
- wsl
-run:
- skip-dirs:
+issues:
+ exclude-dirs:
- testdata/
diff --git a/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go b/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go
index 44a15091e7..229c847d5a 100644
--- a/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go
+++ b/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go
@@ -79,6 +79,8 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) {
return
}
+ initAssign := init.Tok == token.ASSIGN
+
if len(init.Lhs) != 1 || len(init.Rhs) != 1 {
return
}
@@ -97,16 +99,13 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) {
return
}
- var nExpr ast.Expr
+ var (
+ operand ast.Expr
+ hasEquivalentOperator bool
+ )
switch cond.Op {
- case token.LSS: // ;i < n;
- if isBenchmark(cond.Y) {
- return
- }
-
- nExpr = findNExpr(cond.Y)
-
+ case token.LSS, token.LEQ: // ;i < n; || ;i <= n;
x, ok := cond.X.(*ast.Ident)
if !ok {
return
@@ -115,13 +114,10 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) {
if x.Name != initIdent.Name {
return
}
- case token.GTR: // ;n > i;
- if isBenchmark(cond.X) {
- return
- }
-
- nExpr = findNExpr(cond.X)
+ hasEquivalentOperator = cond.Op == token.LEQ
+ operand = cond.Y
+ case token.GTR, token.GEQ: // ;n > i; || ;n >= i;
y, ok := cond.Y.(*ast.Ident)
if !ok {
return
@@ -130,6 +126,9 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) {
if y.Name != initIdent.Name {
return
}
+
+ hasEquivalentOperator = cond.Op == token.GEQ
+ operand = cond.X
default:
return
}
@@ -228,7 +227,7 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) {
bc := &bodyChecker{
initIdent: initIdent,
- nExpr: nExpr,
+ nExpr: findNExpr(operand),
}
ast.Inspect(forStmt.Body, bc.check)
@@ -237,9 +236,50 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) {
return
}
+ if initAssign {
+ pass.Report(analysis.Diagnostic{
+ Pos: forStmt.Pos(),
+ Message: msg + "\nBecause the key is not part of the loop's scope, take care to consider side effects.",
+ })
+
+ return
+ }
+
+ operandIsNumberLit := isNumberLit(operand)
+
+ if hasEquivalentOperator && !operandIsNumberLit {
+ return
+ }
+
+ rangeX := operandToString(
+ pass,
+ initIdent,
+ operand,
+ hasEquivalentOperator && operandIsNumberLit,
+ )
+
+ var replacement string
+ if bc.accessed {
+ replacement = fmt.Sprintf("%s := range %s", initIdent.Name, rangeX)
+ } else {
+ replacement = fmt.Sprintf("range %s", rangeX)
+ }
+
pass.Report(analysis.Diagnostic{
Pos: forStmt.Pos(),
Message: msg,
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ Message: fmt.Sprintf("Replace loop with `%s`", replacement),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: forStmt.Init.Pos(),
+ End: forStmt.Post.End(),
+ NewText: []byte(replacement),
+ },
+ },
+ },
+ },
})
}
@@ -363,26 +403,45 @@ func findNExpr(expr ast.Expr) ast.Expr {
}
}
-func isBenchmark(expr ast.Expr) bool {
- selectorExpr, ok := expr.(*ast.SelectorExpr)
- if !ok {
- return false
- }
+func recursiveOperandToString(
+ expr ast.Expr,
+ incrementInt bool,
+) string {
+ switch e := expr.(type) {
+ case *ast.CallExpr:
+ args := ""
- if selectorExpr.Sel.Name != "N" {
- return false
- }
+ for i, v := range e.Args {
+ if i > 0 {
+ args += ", "
+ }
- ident, ok := selectorExpr.X.(*ast.Ident)
- if !ok {
- return false
- }
+ args += recursiveOperandToString(v, incrementInt && len(e.Args) == 1)
+ }
- if ident.Name == "b" {
- return true
- }
+ return recursiveOperandToString(e.Fun, false) + "(" + args + ")"
+ case *ast.BasicLit:
+ if incrementInt && e.Kind == token.INT {
+ v, err := strconv.Atoi(e.Value)
+ if err == nil {
+ return strconv.Itoa(v + 1)
+ }
+
+ return e.Value
+ }
- return false
+ return e.Value
+ case *ast.Ident:
+ return e.Name
+ case *ast.SelectorExpr:
+ return recursiveOperandToString(e.X, false) + "." + recursiveOperandToString(e.Sel, false)
+ case *ast.IndexExpr:
+ return recursiveOperandToString(e.X, false) + "[" + recursiveOperandToString(e.Index, false) + "]"
+ case *ast.BinaryExpr:
+ return recursiveOperandToString(e.X, false) + " " + e.Op.String() + " " + recursiveOperandToString(e.Y, false)
+ default:
+ return ""
+ }
}
func identEqual(a, b ast.Expr) bool {
@@ -428,6 +487,7 @@ type bodyChecker struct {
initIdent *ast.Ident
nExpr ast.Expr
modified bool
+ accessed bool
}
func (b *bodyChecker) check(n ast.Node) bool {
@@ -446,11 +506,55 @@ func (b *bodyChecker) check(n ast.Node) bool {
return false
}
+ case *ast.Ident:
+ if identEqual(stmt, b.initIdent) {
+ b.accessed = true
+ }
}
return true
}
+func isNumberLit(exp ast.Expr) bool {
+ switch lit := exp.(type) {
+ case *ast.BasicLit:
+ if lit.Kind == token.INT {
+ return true
+ }
+
+ return false
+ case *ast.CallExpr:
+ switch fun := lit.Fun.(type) {
+ case *ast.Ident:
+ switch fun.Name {
+ case
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64":
+ default:
+ return false
+ }
+ default:
+ return false
+ }
+
+ if len(lit.Args) != 1 {
+ return false
+ }
+
+ return isNumberLit(lit.Args[0])
+ default:
+ return false
+ }
+}
+
func compareNumberLit(exp ast.Expr, val int) bool {
switch lit := exp.(type) {
case *ast.BasicLit:
@@ -497,3 +601,27 @@ func compareNumberLit(exp ast.Expr, val int) bool {
return false
}
}
+
+func operandToString(
+ pass *analysis.Pass,
+ i *ast.Ident,
+ operand ast.Expr,
+ increment bool,
+) string {
+ s := recursiveOperandToString(operand, increment)
+ t := pass.TypesInfo.TypeOf(i)
+
+ if t == types.Typ[types.Int] {
+ if len(s) > 5 && s[:4] == "int(" && s[len(s)-1] == ')' {
+ s = s[4 : len(s)-1]
+ }
+
+ return s
+ }
+
+ if len(s) > 2 && s[len(s)-1] == ')' {
+ return s
+ }
+
+ return t.String() + "(" + s + ")"
+}
diff --git a/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml
index e3bf79ae72..fdf0bb2f22 100644
--- a/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml
+++ b/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml
@@ -5,14 +5,12 @@ linters:
- bodyclose
- decorder
- durationcheck
+ - err113
- errchkjson
- errname
- errorlint
- - execinquery
- exhaustive
- - exportloopref
- gocritic
- - goerr113
- gofmt
- goimports
- goprintffuncname
@@ -20,7 +18,6 @@ linters:
- importas
- misspell
- nolintlint
- - nosnakecase
- prealloc
- predeclared
- promlinter
diff --git a/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md b/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md
index ac9c131df2..190756f928 100644
--- a/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md
+++ b/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md
@@ -47,7 +47,8 @@ Package variable reassignment is generally confusing, though, and we recommend a
The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is
recommended if it works with your code.
-## Limitations
+## Development
-If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing
-can be confusing, so it's recommended to rename the variable.
+[mage](https://magefile.org/) is used for development. Run `go run mage.go -l` to see available targets.
+
+For example, to run checks before sending a PR, run `go run mage.go check`.
diff --git a/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go
index e1b47d5b95..c2a29c5299 100644
--- a/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go
+++ b/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go
@@ -48,23 +48,35 @@ func run(pass *analysis.Pass) (interface{}, error) {
func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) {
switch x := expr.(type) {
case *ast.SelectorExpr:
- if !checkRE.MatchString(x.Sel.Name) {
- return
- }
-
selectIdent, ok := x.X.(*ast.Ident)
if !ok {
return
}
+ var pkgPath string
if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok {
- if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg {
+ pkg, ok := selectObj.(*types.PkgName)
+ if !ok || pkg.Imported() == pass.Pkg {
return
}
+ pkgPath = pkg.Imported().Path()
}
- pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name)
+ matches := false
+ if checkRE.MatchString(x.Sel.Name) {
+ matches = true
+ }
+ if !matches {
+ // Expression may include a package name, so check that too. Support was added later so we check
+ // just name and qualified name separately for compatibility.
+ if checkRE.MatchString(pkgPath + "." + x.Sel.Name) {
+ matches = true
+ }
+ }
+ if matches {
+ pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name)
+ }
case *ast.Ident:
use, ok := pass.TypesInfo.Uses[x].(*types.Var)
if !ok {
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/.gitignore b/hack/tools/vendor/github.com/felixge/httpsnoop/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/hack/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt
new file mode 100644
index 0000000000..e028b46a9b
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/Makefile b/hack/tools/vendor/github.com/felixge/httpsnoop/Makefile
new file mode 100644
index 0000000000..4e12afdd90
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/Makefile
@@ -0,0 +1,10 @@
+.PHONY: ci generate clean
+
+ci: clean generate
+ go test -race -v ./...
+
+generate:
+ go generate .
+
+clean:
+ rm -rf *_generated*.go
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/README.md b/hack/tools/vendor/github.com/felixge/httpsnoop/README.md
new file mode 100644
index 0000000000..cf6b42f3d7
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/README.md
@@ -0,0 +1,95 @@
+# httpsnoop
+
+Package httpsnoop provides an easy way to capture http related metrics (i.e.
+response time, bytes written, and http status code) from your application's
+http.Handlers.
+
+Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
+which is also exposed for users interested in a more low-level API.
+
+[](https://pkg.go.dev/github.com/felixge/httpsnoop)
+[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
+
+## Usage Example
+
+```go
+// myH is your app's http handler, perhaps a http.ServeMux or similar.
+var myH http.Handler
+// wrappedH wraps myH in order to log every request.
+wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ m := httpsnoop.CaptureMetrics(myH, w, r)
+ log.Printf(
+ "%s %s (code=%d dt=%s written=%d)",
+ r.Method,
+ r.URL,
+ m.Code,
+ m.Duration,
+ m.Written,
+ )
+})
+http.ListenAndServe(":8080", wrappedH)
+```
+
+## Why this package exists
+
+Instrumenting an application's http.Handler is surprisingly difficult.
+
+However if you google for e.g. "capture ResponseWriter status code" you'll find
+lots of advise and code examples that suggest it to be a fairly trivial
+undertaking. Unfortunately everything I've seen so far has a high chance of
+breaking your application.
+
+The main problem is that a `http.ResponseWriter` often implements additional
+interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
+`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
+in your own struct that also implements the `http.ResponseWriter` interface
+will hide the additional interfaces mentioned above. This has a high change of
+introducing subtle bugs into any non-trivial application.
+
+Another approach I've seen people take is to return a struct that implements
+all of the interfaces above. However, that's also problematic, because it's
+difficult to fake some of these interfaces behaviors when the underlying
+`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
+because an application may choose to operate differently, merely because it
+detects the presence of these additional interfaces.
+
+This package solves this problem by checking which additional interfaces a
+`http.ResponseWriter` implements, returning a wrapped version implementing the
+exact same set of interfaces.
+
+Additionally this package properly handles edge cases such as `WriteHeader` not
+being called, or called more than once, as well as concurrent calls to
+`http.ResponseWriter` methods, and even calls happening after the wrapped
+`ServeHTTP` has already returned.
+
+Unfortunately this package is not perfect either. It's possible that it is
+still missing some interfaces provided by the go core (let me know if you find
+one), and it won't work for applications adding their own interfaces into the
+mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
+`http.ResponseWriter` and type-assert the result to its other interfaces.
+
+However, hopefully the explanation above has sufficiently scared you of rolling
+your own solution to this problem. httpsnoop may still break your application,
+but at least it tries to avoid it as much as possible.
+
+Anyway, the real problem here is that smuggling additional interfaces inside
+`http.ResponseWriter` is a problematic design choice, but it probably goes as
+deep as the Go language specification itself. But that's okay, I still prefer
+Go over the alternatives ;).
+
+## Performance
+
+```
+BenchmarkBaseline-8 20000 94912 ns/op
+BenchmarkCaptureMetrics-8 20000 95461 ns/op
+```
+
+As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
+overhead of ~500 ns per http request on my machine. However, the margin of
+error appears to be larger than that, therefor it should be reasonable to
+assume that the overhead introduced by `CaptureMetrics` is absolutely
+negligible.
+
+## License
+
+MIT
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/hack/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go
new file mode 100644
index 0000000000..bec7b71b39
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -0,0 +1,86 @@
+package httpsnoop
+
+import (
+ "io"
+ "net/http"
+ "time"
+)
+
+// Metrics holds metrics captured from CaptureMetrics.
+type Metrics struct {
+ // Code is the first http response code passed to the WriteHeader func of
+ // the ResponseWriter. If no such call is made, a default code of 200 is
+ // assumed instead.
+ Code int
+ // Duration is the time it took to execute the handler.
+ Duration time.Duration
+ // Written is the number of bytes successfully written by the Write or
+ // ReadFrom function of the ResponseWriter. ResponseWriters may also write
+ // data to their underlaying connection directly (e.g. headers), but those
+ // are not tracked. Therefor the number of Written bytes will usually match
+ // the size of the response body.
+ Written int64
+}
+
+// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
+// returns the metrics it captured from it.
+func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
+ return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
+ hnd.ServeHTTP(ww, r)
+ })
+}
+
+// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
+// resulting metrics. This is very similar to CaptureMetrics (which is just
+// sugar on top of this func), but is a more usable interface if your
+// application doesn't use the Go http.Handler interface.
+func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
+ m := Metrics{Code: http.StatusOK}
+ m.CaptureMetrics(w, fn)
+ return m
+}
+
+// CaptureMetrics wraps w and calls fn with the wrapped w and updates
+// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn,
+// but allows one to customize starting Metrics object.
+func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) {
+ var (
+ start = time.Now()
+ headerWritten bool
+ hooks = Hooks{
+ WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
+ return func(code int) {
+ next(code)
+
+ if !(code >= 100 && code <= 199) && !headerWritten {
+ m.Code = code
+ headerWritten = true
+ }
+ }
+ },
+
+ Write: func(next WriteFunc) WriteFunc {
+ return func(p []byte) (int, error) {
+ n, err := next(p)
+
+ m.Written += int64(n)
+ headerWritten = true
+ return n, err
+ }
+ },
+
+ ReadFrom: func(next ReadFromFunc) ReadFromFunc {
+ return func(src io.Reader) (int64, error) {
+ n, err := next(src)
+
+ headerWritten = true
+ m.Written += n
+ return n, err
+ }
+ },
+ }
+ )
+
+ fn(Wrap(w, hooks))
+ m.Duration += time.Since(start)
+}
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/docs.go b/hack/tools/vendor/github.com/felixge/httpsnoop/docs.go
new file mode 100644
index 0000000000..203c35b3c6
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/docs.go
@@ -0,0 +1,10 @@
+// Package httpsnoop provides an easy way to capture http related metrics (i.e.
+// response time, bytes written, and http status code) from your application's
+// http.Handlers.
+//
+// Doing this requires non-trivial wrapping of the http.ResponseWriter
+// interface, which is also exposed for users interested in a more low-level
+// API.
+package httpsnoop
+
+//go:generate go run codegen/main.go
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
new file mode 100644
index 0000000000..101cedde67
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -0,0 +1,436 @@
+// +build go1.8
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
+
+package httpsnoop
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// HeaderFunc is part of the http.ResponseWriter interface.
+type HeaderFunc func() http.Header
+
+// WriteHeaderFunc is part of the http.ResponseWriter interface.
+type WriteHeaderFunc func(code int)
+
+// WriteFunc is part of the http.ResponseWriter interface.
+type WriteFunc func(b []byte) (int, error)
+
+// FlushFunc is part of the http.Flusher interface.
+type FlushFunc func()
+
+// CloseNotifyFunc is part of the http.CloseNotifier interface.
+type CloseNotifyFunc func() <-chan bool
+
+// HijackFunc is part of the http.Hijacker interface.
+type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
+
+// ReadFromFunc is part of the io.ReaderFrom interface.
+type ReadFromFunc func(src io.Reader) (int64, error)
+
+// PushFunc is part of the http.Pusher interface.
+type PushFunc func(target string, opts *http.PushOptions) error
+
+// Hooks defines a set of method interceptors for methods included in
+// http.ResponseWriter as well as some others. You can think of them as
+// middleware for the function calls they target. See Wrap for more details.
+type Hooks struct {
+ Header func(HeaderFunc) HeaderFunc
+ WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
+ Write func(WriteFunc) WriteFunc
+ Flush func(FlushFunc) FlushFunc
+ CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
+ Hijack func(HijackFunc) HijackFunc
+ ReadFrom func(ReadFromFunc) ReadFromFunc
+ Push func(PushFunc) PushFunc
+}
+
+// Wrap returns a wrapped version of w that provides the exact same interface
+// as w. Specifically if w implements any combination of:
+//
+// - http.Flusher
+// - http.CloseNotifier
+// - http.Hijacker
+// - io.ReaderFrom
+// - http.Pusher
+//
+// The wrapped version will implement the exact same combination. If no hooks
+// are set, the wrapped version also behaves exactly as w. Hooks targeting
+// methods not supported by w are ignored. Any other hooks will intercept the
+// method they target and may modify the call's arguments and/or return values.
+// The CaptureMetrics implementation serves as a working example for how the
+// hooks can be used.
+func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
+ rw := &rw{w: w, h: hooks}
+ _, i0 := w.(http.Flusher)
+ _, i1 := w.(http.CloseNotifier)
+ _, i2 := w.(http.Hijacker)
+ _, i3 := w.(io.ReaderFrom)
+ _, i4 := w.(http.Pusher)
+ switch {
+ // combination 1/32
+ case !i0 && !i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ }{rw, rw}
+ // combination 2/32
+ case !i0 && !i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Pusher
+ }{rw, rw, rw}
+ // combination 3/32
+ case !i0 && !i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 4/32
+ case !i0 && !i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 5/32
+ case !i0 && !i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 6/32
+ case !i0 && !i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 7/32
+ case !i0 && !i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 8/32
+ case !i0 && !i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 9/32
+ case !i0 && i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ }{rw, rw, rw}
+ // combination 10/32
+ case !i0 && i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 11/32
+ case !i0 && i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 12/32
+ case !i0 && i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 13/32
+ case !i0 && i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 14/32
+ case !i0 && i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 15/32
+ case !i0 && i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 16/32
+ case !i0 && i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 17/32
+ case i0 && !i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ }{rw, rw, rw}
+ // combination 18/32
+ case i0 && !i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 19/32
+ case i0 && !i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 20/32
+ case i0 && !i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 21/32
+ case i0 && !i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 22/32
+ case i0 && !i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 23/32
+ case i0 && !i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 24/32
+ case i0 && !i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 25/32
+ case i0 && i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{rw, rw, rw, rw}
+ // combination 26/32
+ case i0 && i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 27/32
+ case i0 && i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 28/32
+ case i0 && i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 29/32
+ case i0 && i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw, rw}
+ // combination 30/32
+ case i0 && i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 31/32
+ case i0 && i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 32/32
+ case i0 && i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw, rw}
+ }
+ panic("unreachable")
+}
+
+type rw struct {
+ w http.ResponseWriter
+ h Hooks
+}
+
+func (w *rw) Unwrap() http.ResponseWriter {
+ return w.w
+}
+
+func (w *rw) Header() http.Header {
+ f := w.w.(http.ResponseWriter).Header
+ if w.h.Header != nil {
+ f = w.h.Header(f)
+ }
+ return f()
+}
+
+func (w *rw) WriteHeader(code int) {
+ f := w.w.(http.ResponseWriter).WriteHeader
+ if w.h.WriteHeader != nil {
+ f = w.h.WriteHeader(f)
+ }
+ f(code)
+}
+
+func (w *rw) Write(b []byte) (int, error) {
+ f := w.w.(http.ResponseWriter).Write
+ if w.h.Write != nil {
+ f = w.h.Write(f)
+ }
+ return f(b)
+}
+
+func (w *rw) Flush() {
+ f := w.w.(http.Flusher).Flush
+ if w.h.Flush != nil {
+ f = w.h.Flush(f)
+ }
+ f()
+}
+
+func (w *rw) CloseNotify() <-chan bool {
+ f := w.w.(http.CloseNotifier).CloseNotify
+ if w.h.CloseNotify != nil {
+ f = w.h.CloseNotify(f)
+ }
+ return f()
+}
+
+func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ f := w.w.(http.Hijacker).Hijack
+ if w.h.Hijack != nil {
+ f = w.h.Hijack(f)
+ }
+ return f()
+}
+
+func (w *rw) ReadFrom(src io.Reader) (int64, error) {
+ f := w.w.(io.ReaderFrom).ReadFrom
+ if w.h.ReadFrom != nil {
+ f = w.h.ReadFrom(f)
+ }
+ return f(src)
+}
+
+func (w *rw) Push(target string, opts *http.PushOptions) error {
+ f := w.w.(http.Pusher).Push
+ if w.h.Push != nil {
+ f = w.h.Push(f)
+ }
+ return f(target, opts)
+}
+
+type Unwrapper interface {
+ Unwrap() http.ResponseWriter
+}
+
+// Unwrap returns the underlying http.ResponseWriter from within zero or more
+// layers of httpsnoop wrappers.
+func Unwrap(w http.ResponseWriter) http.ResponseWriter {
+ if rw, ok := w.(Unwrapper); ok {
+ // recurse until rw.Unwrap() returns a non-Unwrapper
+ return Unwrap(rw.Unwrap())
+ } else {
+ return w
+ }
+}
diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
new file mode 100644
index 0000000000..e0951df152
--- /dev/null
+++ b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -0,0 +1,278 @@
+// +build !go1.8
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
+
+package httpsnoop
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// HeaderFunc is part of the http.ResponseWriter interface.
+type HeaderFunc func() http.Header
+
+// WriteHeaderFunc is part of the http.ResponseWriter interface.
+type WriteHeaderFunc func(code int)
+
+// WriteFunc is part of the http.ResponseWriter interface.
+type WriteFunc func(b []byte) (int, error)
+
+// FlushFunc is part of the http.Flusher interface.
+type FlushFunc func()
+
+// CloseNotifyFunc is part of the http.CloseNotifier interface.
+type CloseNotifyFunc func() <-chan bool
+
+// HijackFunc is part of the http.Hijacker interface.
+type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
+
+// ReadFromFunc is part of the io.ReaderFrom interface.
+type ReadFromFunc func(src io.Reader) (int64, error)
+
+// Hooks defines a set of method interceptors for methods included in
+// http.ResponseWriter as well as some others. You can think of them as
+// middleware for the function calls they target. See Wrap for more details.
+type Hooks struct {
+ Header func(HeaderFunc) HeaderFunc
+ WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
+ Write func(WriteFunc) WriteFunc
+ Flush func(FlushFunc) FlushFunc
+ CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
+ Hijack func(HijackFunc) HijackFunc
+ ReadFrom func(ReadFromFunc) ReadFromFunc
+}
+
+// Wrap returns a wrapped version of w that provides the exact same interface
+// as w. Specifically if w implements any combination of:
+//
+// - http.Flusher
+// - http.CloseNotifier
+// - http.Hijacker
+// - io.ReaderFrom
+//
+// The wrapped version will implement the exact same combination. If no hooks
+// are set, the wrapped version also behaves exactly as w. Hooks targeting
+// methods not supported by w are ignored. Any other hooks will intercept the
+// method they target and may modify the call's arguments and/or return values.
+// The CaptureMetrics implementation serves as a working example for how the
+// hooks can be used.
+func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
+ rw := &rw{w: w, h: hooks}
+ _, i0 := w.(http.Flusher)
+ _, i1 := w.(http.CloseNotifier)
+ _, i2 := w.(http.Hijacker)
+ _, i3 := w.(io.ReaderFrom)
+ switch {
+ // combination 1/16
+ case !i0 && !i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ }{rw, rw}
+ // combination 2/16
+ case !i0 && !i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 3/16
+ case !i0 && !i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 4/16
+ case !i0 && !i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 5/16
+ case !i0 && i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ }{rw, rw, rw}
+ // combination 6/16
+ case !i0 && i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 7/16
+ case !i0 && i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 8/16
+ case !i0 && i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 9/16
+ case i0 && !i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ }{rw, rw, rw}
+ // combination 10/16
+ case i0 && !i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 11/16
+ case i0 && !i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 12/16
+ case i0 && !i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 13/16
+ case i0 && i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{rw, rw, rw, rw}
+ // combination 14/16
+ case i0 && i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 15/16
+ case i0 && i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw, rw}
+ // combination 16/16
+ case i0 && i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw, rw}
+ }
+ panic("unreachable")
+}
+
+type rw struct {
+ w http.ResponseWriter
+ h Hooks
+}
+
+func (w *rw) Unwrap() http.ResponseWriter {
+ return w.w
+}
+
+func (w *rw) Header() http.Header {
+ f := w.w.(http.ResponseWriter).Header
+ if w.h.Header != nil {
+ f = w.h.Header(f)
+ }
+ return f()
+}
+
+func (w *rw) WriteHeader(code int) {
+ f := w.w.(http.ResponseWriter).WriteHeader
+ if w.h.WriteHeader != nil {
+ f = w.h.WriteHeader(f)
+ }
+ f(code)
+}
+
+func (w *rw) Write(b []byte) (int, error) {
+ f := w.w.(http.ResponseWriter).Write
+ if w.h.Write != nil {
+ f = w.h.Write(f)
+ }
+ return f(b)
+}
+
+func (w *rw) Flush() {
+ f := w.w.(http.Flusher).Flush
+ if w.h.Flush != nil {
+ f = w.h.Flush(f)
+ }
+ f()
+}
+
+func (w *rw) CloseNotify() <-chan bool {
+ f := w.w.(http.CloseNotifier).CloseNotify
+ if w.h.CloseNotify != nil {
+ f = w.h.CloseNotify(f)
+ }
+ return f()
+}
+
+func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ f := w.w.(http.Hijacker).Hijack
+ if w.h.Hijack != nil {
+ f = w.h.Hijack(f)
+ }
+ return f()
+}
+
+func (w *rw) ReadFrom(src io.Reader) (int64, error) {
+ f := w.w.(io.ReaderFrom).ReadFrom
+ if w.h.ReadFrom != nil {
+ f = w.h.ReadFrom(f)
+ }
+ return f(src)
+}
+
+type Unwrapper interface {
+ Unwrap() http.ResponseWriter
+}
+
+// Unwrap returns the underlying http.ResponseWriter from within zero or more
+// layers of httpsnoop wrappers.
+func Unwrap(w http.ResponseWriter) http.ResponseWriter {
+ if rw, ok := w.(Unwrapper); ok {
+ // recurse until rw.Unwrap() returns a non-Unwrapper
+ return Unwrap(rw.Unwrap())
+ } else {
+ return w
+ }
+}
diff --git a/hack/tools/vendor/github.com/go-logr/logr/funcr/funcr.go b/hack/tools/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 0000000000..30568e768d
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,914 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.Formatter.AddCallDepth(1)
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []any) []any
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []any) []any
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []any) []any
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{') // for the whole record
+ }
+
+ // Render builtins
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, true) // escape user-provided keys
+
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
+ }
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
+ }
+
+ if bodyStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(bodyStr)
+ }
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
+ }
+
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
+ }
+
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ copied := false
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(f.comma())
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `""`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(f.comma())
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`""`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = ""
+
+func (f Formatter) nonStringKey(v any) string {
+ return fmt.Sprintf("", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(name string) {
+ // Unnamed groups are just inlined.
+ if name == "" {
+ return
+ }
+
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
+
+ // Start collecting new values.
+ f.groupName = name
+ f.valuesStr = ""
+ f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr any
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/hack/tools/vendor/github.com/go-logr/logr/funcr/slogsink.go b/hack/tools/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 0000000000..7bd84761e2
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/hack/tools/vendor/github.com/go-logr/stdr/LICENSE b/hack/tools/vendor/github.com/go-logr/stdr/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-logr/stdr/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/hack/tools/vendor/github.com/go-logr/stdr/README.md b/hack/tools/vendor/github.com/go-logr/stdr/README.md
new file mode 100644
index 0000000000..5158667890
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-logr/stdr/README.md
@@ -0,0 +1,6 @@
+# Minimal Go logging using logr and Go's standard library
+
+[](https://pkg.go.dev/github.com/go-logr/stdr)
+
+This package implements the [logr interface](https://github.com/go-logr/logr)
+in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/hack/tools/vendor/github.com/go-logr/stdr/stdr.go b/hack/tools/vendor/github.com/go-logr/stdr/stdr.go
new file mode 100644
index 0000000000..93a8aab51b
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-logr/stdr/stdr.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package stdr implements github.com/go-logr/logr.Logger in terms of
+// Go's standard log package.
+package stdr
+
+import (
+ "log"
+ "os"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+// The global verbosity level. See SetVerbosity().
+var globalVerbosity int
+
+// SetVerbosity sets the global level against which all info logs will be
+// compared. If this is greater than or equal to the "V" of the logger, the
+// message will be logged. A higher value here means more logs will be written.
+// The previous verbosity value is returned. This is not concurrent-safe -
+// callers must be sure to call it from only one goroutine.
+func SetVerbosity(v int) int {
+ old := globalVerbosity
+ globalVerbosity = v
+ return old
+}
+
+// New returns a logr.Logger which is implemented by Go's standard log package,
+// or something like it. If std is nil, this will use a default logger
+// instead.
+//
+// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+func New(std StdLogger) logr.Logger {
+ return NewWithOptions(std, Options{})
+}
+
+// NewWithOptions returns a logr.Logger which is implemented by Go's standard
+// log package, or something like it. See New for details.
+func NewWithOptions(std StdLogger, opts Options) logr.Logger {
+ if std == nil {
+ // Go's log.Default() is only available in 1.16 and higher.
+ std = log.New(os.Stderr, "", log.LstdFlags)
+ }
+
+ if opts.Depth < 0 {
+ opts.Depth = 0
+ }
+
+ fopts := funcr.Options{
+ LogCaller: funcr.MessageClass(opts.LogCaller),
+ }
+
+ sl := &logger{
+ Formatter: funcr.NewFormatter(fopts),
+ std: std,
+ }
+
+ // For skipping our own logger.Info/Error.
+ sl.Formatter.AddCallDepth(1 + opts.Depth)
+
+ return logr.New(sl)
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // Depth biases the assumed number of call frames to the "true" caller.
+ // This is useful when the calling code calls a function which then calls
+ // stdr (e.g. a logging shim to another API). Values less than zero will
+ // be treated as zero.
+ Depth int
+
+ // LogCaller tells stdr to add a "caller" key to some or all log lines.
+ // Go's log package has options to log this natively, too.
+ LogCaller MessageClass
+
+ // TODO: add an option to log the date/time
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
+// this adapter.
+type StdLogger interface {
+ // Output is the same as log.Output and log.Logger.Output.
+ Output(calldepth int, logline string) error
+}
+
+type logger struct {
+ funcr.Formatter
+ std StdLogger
+}
+
+var _ logr.LogSink = &logger{}
+var _ logr.CallDepthLogSink = &logger{}
+
+func (l logger) Enabled(level int) bool {
+ return globalVerbosity >= level
+}
+
+func (l logger) Info(level int, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) Error(err error, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l logger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+// Underlier exposes access to the underlying logging implementation. Since
+// callers only have a logr.Logger, they have to know which implementation is
+// in use, so this interface is less of an abstraction and more of way to test
+// type conversion.
+type Underlier interface {
+ GetUnderlying() StdLogger
+}
+
+// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
+// is itself an interface, the result may or may not be a Go log.Logger.
+func (l logger) GetUnderlying() StdLogger {
+ return l.std
+}
diff --git a/hack/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/hack/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 0000000000..22f8d21cca
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md b/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788aff1..0108f1d572 100644
--- a/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go b/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go
index de60dc7dd2..d970c7cf44 100644
--- a/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -110,19 +110,39 @@ func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -170,7 +190,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -231,8 +251,7 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -245,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
@@ -284,7 +303,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -405,11 +424,11 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
case json.Delim:
switch tk {
case '{':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
case '[':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
}
@@ -435,20 +454,21 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
if err != nil {
return 0, err
}
- switch tk := tk.(type) {
- case json.Delim:
- switch tk {
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
case '{':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
case '[':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
}
}
}
+
if !dec.More() {
return 0, fmt.Errorf("token reference %q not found", decodedToken)
}
@@ -456,27 +476,27 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
}
// drainSingle drains a single level of object or array.
-// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
func drainSingle(dec *json.Decoder) error {
for dec.More() {
tk, err := dec.Token()
if err != nil {
return err
}
- switch tk := tk.(type) {
- case json.Delim:
- switch tk {
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
case '{':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return err
}
case '[':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return err
}
}
}
}
+
// Consumes the ending delim
if _, err := dec.Token(); err != nil {
return err
@@ -498,14 +518,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/.gitignore b/hack/tools/vendor/github.com/go-openapi/swag/.gitignore
index d69b53accc..c4b1b64f04 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/.gitignore
+++ b/hack/tools/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml b/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e4000..80e2be0042 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -4,14 +4,14 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 25
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 3
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -20,35 +20,41 @@ linters:
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
- wrapcheck
+ - testpackage
+ - nlreturn
- gomnd
- - exhaustive
- exhaustivestruct
- goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
+ - errorlint
- nestif
- - godox
- - funlen
- - gci
- - gocognit
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md b/hack/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 0000000000..e7f28ed6b7
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/README.md b/hack/tools/vendor/github.com/go-openapi/swag/README.md
index 217f6fa505..a729222998 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/README.md
+++ b/hack/tools/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/initialism_index.go b/hack/tools/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 0000000000..20a359bb60
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/loading.go b/hack/tools/vendor/github.com/go-openapi/swag/loading.go
index 00038c3773..783442fddf 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/loading.go
+++ b/hack/tools/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go b/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9bb8..8bb64ac32f 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
+ kind lexemKind
}
+)
- casualNameLexem struct {
- original string
- }
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/post_go19.go b/hack/tools/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c088..0000000000
--- a/hack/tools/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/pre_go19.go b/hack/tools/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db377b..0000000000
--- a/hack/tools/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/split.go b/hack/tools/vendor/github.com/go-openapi/swag/split.go
index a1825fb7dc..274727a866 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/split.go
+++ b/hack/tools/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
}
- splitterOption func(*splitter) *splitter
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
)
-// split calls the splitter; splitter provides more control and post options
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
- for _, lexem := range lexems {
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
+ return s
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
+func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
}
- initialismMatches []*initialismMatch
-)
-func (s *splitter) toNameLexems(name string) []nameLexem {
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
}
+
+ match.complete = true
+ match.end = currentRunePosition
}
- match.complete = true
- match.end = currentRunePosition
+ *newMatches = append(*newMatches, match)
}
-
- newMatches = append(newMatches, match)
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
- }
-
- if lastAcceptedMatch.end+1 != len(nameRunes) {
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- return nameLexems
-}
+ poolOfMatches.RedeemMatches(matches)
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
+ return nameLexems
}
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ i += size
}
- return segments
+ return true
}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/string_bytes.go b/hack/tools/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 0000000000..90745d5ca9
--- /dev/null
+++ b/hack/tools/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/util.go b/hack/tools/vendor/github.com/go-openapi/swag/util.go
index d971fbe34b..5051401c49 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/util.go
+++ b/hack/tools/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
+
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
- result := ""
- for _, lexem := range lexems {
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -343,7 +323,7 @@ type zeroable interface {
func IsZero(data interface{}) bool {
v := reflect.ValueOf(data)
// check for nil data
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
@@ -356,7 +336,7 @@ func IsZero(data interface{}) bool {
}
// continue with slightly more complex reflection
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
-}
-
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string
diff --git a/hack/tools/vendor/github.com/go-openapi/swag/yaml.go b/hack/tools/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609f3..f59e025932 100644
--- a/hack/tools/vendor/github.com/go-openapi/swag/yaml.go
+++ b/hack/tools/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
+ "errors"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T", val)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
diff --git a/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go
index 4245e5ad72..365a1d0477 100644
--- a/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go
+++ b/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go
@@ -14,7 +14,10 @@ import (
)
var (
- reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`)
+ reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`)
+ reXMLComments = regexp.MustCompile(`(?s)()`)
+ reSpaces = regexp.MustCompile(`(?s)>\s+<`)
+ reNewlines = regexp.MustCompile(`\r*\n`)
// NL is the newline string used in XML output.
NL = "\n"
)
@@ -33,20 +36,19 @@ func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string
if len(nestedTagsInComments) > 0 {
nestedTagsInComment = nestedTagsInComments[0]
}
- reXmlComments := regexp.MustCompile(`(?s)()`)
- src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><")
+ src := reSpaces.ReplaceAllString(xmls, "><")
if nestedTagsInComment {
- src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string {
- parts := reXmlComments.FindStringSubmatch(m)
- p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ")
+ src = reXMLComments.ReplaceAllStringFunc(src, func(m string) string {
+ parts := reXMLComments.FindStringSubmatch(m)
+ p2 := reNewlines.ReplaceAllString(parts[2], " ")
return parts[1] + html.EscapeString(p2) + parts[3]
})
}
rf := replaceTag(prefix, indent)
r := prefix + reg.ReplaceAllStringFunc(src, rf)
if nestedTagsInComment {
- r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string {
- parts := reXmlComments.FindStringSubmatch(m)
+ r = reXMLComments.ReplaceAllStringFunc(r, func(m string) string {
+ parts := reXMLComments.FindStringSubmatch(m)
return parts[1] + html.UnescapeString(parts[2]) + parts[3]
})
}
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/.gitignore b/hack/tools/vendor/github.com/goccy/go-yaml/.gitignore
new file mode 100644
index 0000000000..e660fd93d3
--- /dev/null
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/.gitignore
@@ -0,0 +1 @@
+bin/
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/.golangci.yml b/hack/tools/vendor/github.com/goccy/go-yaml/.golangci.yml
new file mode 100644
index 0000000000..ec8d7313da
--- /dev/null
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/.golangci.yml
@@ -0,0 +1,42 @@
+run:
+ timeout: 5m
+
+linters-settings:
+ errcheck:
+ check-type-assertions: true
+ gci:
+ sections:
+ - "standard"
+ - "default"
+ - "prefix(github.com/goccy/go-yaml)"
+ - "blank"
+ - "dot"
+ gofmt:
+ simplify: true
+ govet:
+ disable:
+ - tests
+ misspell:
+ locale: US
+ staticcheck:
+ checks: ["all", "-ST1000", "-ST1005"]
+
+linters:
+ disable-all: true
+ enable:
+ - errcheck
+ - gci
+ - gofmt
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - staticcheck
+ - typecheck
+ - unused
+
+issues:
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - staticcheck
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/Makefile b/hack/tools/vendor/github.com/goccy/go-yaml/Makefile
index 1b1d92397c..02a38dd210 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/Makefile
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/Makefile
@@ -17,3 +17,31 @@ cover-html: cover
.PHONY: ycat/build
ycat/build:
go build -o ycat ./cmd/ycat
+
+.PHONY: lint
+lint: golangci-lint ## Run golangci-lint
+ @$(GOLANGCI_LINT) run
+
+.PHONY: fmt
+fmt: golangci-lint ## Ensure consistent code style
+ @go mod tidy
+ @go fmt ./...
+ @$(GOLANGCI_LINT) run --fix
+
+## Location to install dependencies to
+LOCALBIN ?= $(shell pwd)/bin
+$(LOCALBIN):
+ mkdir -p $(LOCALBIN)
+
+## Tool Binaries
+GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
+
+## Tool Versions
+GOLANGCI_VERSION := 1.61.0
+
+.PHONY: golangci-lint
+.PHONY: $(GOLANGCI_LINT)
+golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
+$(GOLANGCI_LINT): $(LOCALBIN)
+ @test -s $(LOCALBIN)/golangci-lint && $(LOCALBIN)/golangci-lint version --format short | grep -q $(GOLANGCI_VERSION) || \
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(LOCALBIN) v$(GOLANGCI_VERSION)
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go b/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go
index b4d5ec4180..eb27588101 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go
@@ -1,6 +1,7 @@
package ast
import (
+ "errors"
"fmt"
"io"
"math"
@@ -8,13 +9,12 @@ import (
"strings"
"github.com/goccy/go-yaml/token"
- "golang.org/x/xerrors"
)
var (
- ErrInvalidTokenType = xerrors.New("invalid token type")
- ErrInvalidAnchorName = xerrors.New("invalid anchor name")
- ErrInvalidAliasName = xerrors.New("invalid alias name")
+ ErrInvalidTokenType = errors.New("invalid token type")
+ ErrInvalidAnchorName = errors.New("invalid anchor name")
+ ErrInvalidAliasName = errors.New("invalid alias name")
)
// NodeType type identifier of node
@@ -1491,7 +1491,7 @@ type SequenceNode struct {
// Replace replace value node.
func (n *SequenceNode) Replace(idx int, value Node) error {
if len(n.Values) <= idx {
- return xerrors.Errorf(
+ return fmt.Errorf(
"invalid index for sequence: sequence length is %d, but specified %d index",
len(n.Values), idx,
)
@@ -2092,10 +2092,10 @@ func Merge(dst Node, src Node) error {
err := &ErrInvalidMergeType{dst: dst, src: src}
switch dst.Type() {
case DocumentType:
- node := dst.(*DocumentNode)
+ node, _ := dst.(*DocumentNode)
return Merge(node.Body, src)
case MappingType:
- node := dst.(*MappingNode)
+ node, _ := dst.(*MappingNode)
target, ok := src.(*MappingNode)
if !ok {
return err
@@ -2103,7 +2103,7 @@ func Merge(dst Node, src Node) error {
node.Merge(target)
return nil
case SequenceType:
- node := dst.(*SequenceNode)
+ node, _ := dst.(*SequenceNode)
target, ok := src.(*SequenceNode)
if !ok {
return err
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/decode.go b/hack/tools/vendor/github.com/goccy/go-yaml/decode.go
index d3dbabcbc9..95f7217a83 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/decode.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/decode.go
@@ -7,7 +7,6 @@ import (
"encoding/base64"
"fmt"
"io"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -20,7 +19,6 @@ import (
"github.com/goccy/go-yaml/internal/errors"
"github.com/goccy/go-yaml/parser"
"github.com/goccy/go-yaml/token"
- "golang.org/x/xerrors"
)
// Decoder reads and decodes YAML values from an input stream.
@@ -100,7 +98,7 @@ func (d *Decoder) castToFloat(v interface{}) interface{} {
func (d *Decoder) mergeValueNode(value ast.Node) ast.Node {
if value.Type() == ast.AliasType {
- aliasNode := value.(*ast.AliasNode)
+ aliasNode, _ := value.(*ast.AliasNode)
aliasName := aliasNode.Value.GetToken().Value
return d.anchorNodeMap[aliasName]
}
@@ -360,7 +358,7 @@ func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) {
if err != nil {
return nil, err
}
- n.Values[idx] = value.(*ast.MappingValueNode)
+ n.Values[idx], _ = value.(*ast.MappingValueNode)
}
case *ast.TagNode:
value, err := d.resolveAlias(n.Value)
@@ -389,7 +387,7 @@ func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) {
if err != nil {
return nil, err
}
- n.Key = key.(ast.MapKeyNode)
+ n.Key, _ = key.(ast.MapKeyNode)
value, err := d.resolveAlias(n.Value)
if err != nil {
return nil, err
@@ -408,7 +406,7 @@ func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) {
aliasName := n.Value.GetToken().Value
node := d.anchorNodeMap[aliasName]
if node == nil {
- return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName)
+ return nil, fmt.Errorf("cannot find anchor by alias name %s", aliasName)
}
return d.resolveAlias(node)
}
@@ -430,7 +428,7 @@ func (d *Decoder) getMapNode(node ast.Node) (ast.MapNode, error) {
aliasName := alias.Value.GetToken().Value
node := d.anchorNodeMap[aliasName]
if node == nil {
- return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName)
+ return nil, fmt.Errorf("cannot find anchor by alias name %s", aliasName)
}
mapNode, ok := node.(ast.MapNode)
if ok {
@@ -461,7 +459,7 @@ func (d *Decoder) getArrayNode(node ast.Node) (ast.ArrayNode, error) {
aliasName := alias.Value.GetToken().Value
node := d.anchorNodeMap[aliasName]
if node == nil {
- return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName)
+ return nil, fmt.Errorf("cannot find anchor by alias name %s", aliasName)
}
arrayNode, ok := node.(ast.ArrayNode)
if ok {
@@ -476,18 +474,24 @@ func (d *Decoder) getArrayNode(node ast.Node) (ast.ArrayNode, error) {
return arrayNode, nil
}
-func (d *Decoder) fileToNode(f *ast.File) ast.Node {
- for _, doc := range f.Docs {
- if v := d.nodeToValue(doc.Body); v != nil {
- return doc.Body
- }
- }
- return nil
-}
-
func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type, src ast.Node) (reflect.Value, error) {
if typ.Kind() != reflect.String {
if !v.Type().ConvertibleTo(typ) {
+
+ // Special case for "strings -> floats" aka scientific notation
+ // If the destination type is a float and the source type is a string, check if we can
+ // use strconv.ParseFloat to convert the string to a float.
+ if (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) &&
+ v.Type().Kind() == reflect.String {
+ if f, err := strconv.ParseFloat(v.String(), 64); err == nil {
+ if typ.Kind() == reflect.Float32 {
+ return reflect.ValueOf(float32(f)), nil
+ } else if typ.Kind() == reflect.Float64 {
+ return reflect.ValueOf(f), nil
+ }
+ // else, fall through to the error below
+ }
+ }
return reflect.Zero(typ), errTypeMismatch(typ, v.Type(), src.GetToken())
}
return v.Convert(typ), nil
@@ -509,19 +513,6 @@ func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type, src ast.Node)
return v.Convert(typ), nil
}
-type overflowError struct {
- dstType reflect.Type
- srcNum string
-}
-
-func (e *overflowError) Error() string {
- return fmt.Sprintf("cannot unmarshal %s into Go value of type %s ( overflow )", e.srcNum, e.dstType)
-}
-
-func errOverflow(dstType reflect.Type, num string) *overflowError {
- return &overflowError{dstType: dstType, srcNum: num}
-}
-
func errTypeMismatch(dstType, srcType reflect.Type, token *token.Token) *errors.TypeError {
return &errors.TypeError{DstType: dstType, SrcType: srcType, Token: token}
}
@@ -560,7 +551,7 @@ func (d *Decoder) deleteStructKeys(structType reflect.Type, unknownFields map[st
}
structFieldMap, err := structFieldMap(structType)
if err != nil {
- return errors.Wrapf(err, "failed to create struct field map")
+ return err
}
for j := 0; j < structType.NumField(); j++ {
@@ -575,7 +566,7 @@ func (d *Decoder) deleteStructKeys(structType reflect.Type, unknownFields map[st
}
if structField.IsInline {
- d.deleteStructKeys(field.Type, unknownFields)
+ _ = d.deleteStructKeys(field.Type, unknownFields)
} else {
delete(unknownFields, structField.RenderName)
}
@@ -698,10 +689,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if unmarshaler, exists := d.unmarshalerFromCustomUnmarshalerMap(ptrValue.Type()); exists {
b, err := d.unmarshalableDocument(src)
if err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
if err := unmarshaler(ptrValue.Interface(), b); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
return nil
}
@@ -710,10 +701,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if unmarshaler, ok := iface.(BytesUnmarshalerContext); ok {
b, err := d.unmarshalableDocument(src)
if err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
if err := unmarshaler.UnmarshalYAML(ctx, b); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
return nil
}
@@ -721,10 +712,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if unmarshaler, ok := iface.(BytesUnmarshaler); ok {
b, err := d.unmarshalableDocument(src)
if err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
if err := unmarshaler.UnmarshalYAML(b); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
return nil
}
@@ -733,14 +724,14 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if err := unmarshaler.UnmarshalYAML(ctx, func(v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Ptr {
- return errors.ErrDecodeRequiredPointerType
+ return ErrDecodeRequiredPointerType
}
if err := d.decodeValue(ctx, rv.Elem(), src); err != nil {
- return errors.Wrapf(err, "failed to decode value")
+ return err
}
return nil
}); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
return nil
}
@@ -749,14 +740,14 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if err := unmarshaler.UnmarshalYAML(func(v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Ptr {
- return errors.ErrDecodeRequiredPointerType
+ return ErrDecodeRequiredPointerType
}
if err := d.decodeValue(ctx, rv.Elem(), src); err != nil {
- return errors.Wrapf(err, "failed to decode value")
+ return err
}
return nil
}); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalYAML")
+ return err
}
return nil
}
@@ -772,11 +763,11 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if unmarshaler, isText := iface.(encoding.TextUnmarshaler); isText {
b, ok, err := d.unmarshalableText(src)
if err != nil {
- return errors.Wrapf(err, "failed to UnmarshalText")
+ return err
}
if ok {
if err := unmarshaler.UnmarshalText(b); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalText")
+ return err
}
return nil
}
@@ -786,21 +777,21 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr
if unmarshaler, ok := iface.(jsonUnmarshaler); ok {
b, err := d.unmarshalableDocument(src)
if err != nil {
- return errors.Wrapf(err, "failed to UnmarshalJSON")
+ return err
}
jsonBytes, err := YAMLToJSON(b)
if err != nil {
- return errors.Wrapf(err, "failed to convert yaml to json")
+ return err
}
jsonBytes = bytes.TrimRight(jsonBytes, "\n")
if err := unmarshaler.UnmarshalJSON(jsonBytes); err != nil {
- return errors.Wrapf(err, "failed to UnmarshalJSON")
+ return err
}
return nil
}
}
- return xerrors.Errorf("does not implemented Unmarshaler")
+ return fmt.Errorf("does not implemented Unmarshaler")
}
var (
@@ -816,7 +807,7 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No
}
if d.canDecodeByUnmarshaler(dst) {
if err := d.decodeByUnmarshaler(ctx, dst, src); err != nil {
- return errors.Wrapf(err, "failed to decode by unmarshaler")
+ return err
}
return nil
}
@@ -833,7 +824,7 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No
}
v := d.createDecodableValue(dst.Type())
if err := d.decodeValue(ctx, v, src); err != nil {
- return errors.Wrapf(err, "failed to decode ptr value")
+ return err
}
dst.Set(d.castToAssignableValue(v, dst.Type()))
case reflect.Interface:
@@ -877,10 +868,19 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No
dst.SetInt(int64(vv))
return nil
}
+ case string: // handle scientific notation
+ if i, err := strconv.ParseFloat(vv, 64); err == nil {
+ if 0 <= i && i <= math.MaxUint64 && !dst.OverflowInt(int64(i)) {
+ dst.SetInt(int64(i))
+ return nil
+ }
+ } else { // couldn't be parsed as float
+ return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken())
+ }
default:
return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken())
}
- return errOverflow(valueType, fmt.Sprint(v))
+ return errors.ErrOverflow(valueType, fmt.Sprint(v), src.GetToken())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
v := d.nodeToValue(src)
switch vv := v.(type) {
@@ -899,16 +899,26 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No
dst.SetUint(uint64(vv))
return nil
}
+ case string: // handle scientific notation
+ if i, err := strconv.ParseFloat(vv, 64); err == nil {
+ if 0 <= i && i <= math.MaxUint64 && !dst.OverflowUint(uint64(i)) {
+ dst.SetUint(uint64(i))
+ return nil
+ }
+ } else { // couldn't be parsed as float
+ return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken())
+ }
+
default:
return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken())
}
- return errOverflow(valueType, fmt.Sprint(v))
+ return errors.ErrOverflow(valueType, fmt.Sprint(v), src.GetToken())
}
v := reflect.ValueOf(d.nodeToValue(src))
if v.IsValid() {
convertedValue, err := d.convertValue(v, dst.Type(), src)
if err != nil {
- return errors.Wrapf(err, "failed to convert value")
+ return err
}
dst.Set(convertedValue)
}
@@ -955,18 +965,22 @@ func (d *Decoder) createDecodedNewValue(
return newValue, nil
}
}
+ var newValue reflect.Value
if node.Type() == ast.NullType {
- return reflect.Zero(typ), nil
+ newValue = reflect.New(typ).Elem()
+ } else {
+ newValue = d.createDecodableValue(typ)
}
- newValue := d.createDecodableValue(typ)
for defaultVal.Kind() == reflect.Ptr {
defaultVal = defaultVal.Elem()
}
if defaultVal.IsValid() && defaultVal.Type().AssignableTo(newValue.Type()) {
newValue.Set(defaultVal)
}
- if err := d.decodeValue(ctx, newValue, node); err != nil {
- return newValue, errors.Wrapf(err, "failed to decode value")
+ if node.Type() != ast.NullType {
+ if err := d.decodeValue(ctx, newValue, node); err != nil {
+ return newValue, err
+ }
}
return newValue, nil
}
@@ -974,7 +988,7 @@ func (d *Decoder) createDecodedNewValue(
func (d *Decoder) keyToNodeMap(node ast.Node, ignoreMergeKey bool, getKeyOrValueNode func(*ast.MapNodeIter) ast.Node) (map[string]ast.Node, error) {
mapNode, err := d.getMapNode(node)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get map node")
+ return nil, err
}
keyMap := map[string]struct{}{}
keyToNodeMap := map[string]ast.Node{}
@@ -990,21 +1004,21 @@ func (d *Decoder) keyToNodeMap(node ast.Node, ignoreMergeKey bool, getKeyOrValue
}
mergeMap, err := d.keyToNodeMap(mapIter.Value(), ignoreMergeKey, getKeyOrValueNode)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get keyToNodeMap by MergeKey node")
+ return nil, err
}
for k, v := range mergeMap {
if err := d.validateDuplicateKey(keyMap, k, v); err != nil {
- return nil, errors.Wrapf(err, "invalid struct key")
+ return nil, err
}
keyToNodeMap[k] = v
}
} else {
key, ok := d.nodeToValue(keyNode).(string)
if !ok {
- return nil, errors.Wrapf(err, "failed to decode map key")
+ return nil, err
}
if err := d.validateDuplicateKey(keyMap, key, keyNode); err != nil {
- return nil, errors.Wrapf(err, "invalid struct key")
+ return nil, err
}
keyToNodeMap[key] = getKeyOrValueNode(mapIter)
}
@@ -1015,7 +1029,7 @@ func (d *Decoder) keyToNodeMap(node ast.Node, ignoreMergeKey bool, getKeyOrValue
func (d *Decoder) keyToKeyNodeMap(node ast.Node, ignoreMergeKey bool) (map[string]ast.Node, error) {
m, err := d.keyToNodeMap(node, ignoreMergeKey, func(nodeMap *ast.MapNodeIter) ast.Node { return nodeMap.Key() })
if err != nil {
- return nil, errors.Wrapf(err, "failed to get keyToNodeMap")
+ return nil, err
}
return m, nil
}
@@ -1023,7 +1037,7 @@ func (d *Decoder) keyToKeyNodeMap(node ast.Node, ignoreMergeKey bool) (map[strin
func (d *Decoder) keyToValueNodeMap(node ast.Node, ignoreMergeKey bool) (map[string]ast.Node, error) {
m, err := d.keyToNodeMap(node, ignoreMergeKey, func(nodeMap *ast.MapNodeIter) ast.Node { return nodeMap.Value() })
if err != nil {
- return nil, errors.Wrapf(err, "failed to get keyToNodeMap")
+ return nil, err
}
return m, nil
}
@@ -1035,7 +1049,7 @@ func (d *Decoder) setDefaultValueIfConflicted(v reflect.Value, fieldMap StructFi
}
embeddedStructFieldMap, err := structFieldMap(typ)
if err != nil {
- return errors.Wrapf(err, "failed to get struct field map by embedded type")
+ return err
}
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
@@ -1090,7 +1104,7 @@ func (d *Decoder) castToTime(src ast.Node) (time.Time, error) {
func (d *Decoder) decodeTime(ctx context.Context, dst reflect.Value, src ast.Node) error {
t, err := d.castToTime(src)
if err != nil {
- return errors.Wrapf(err, "failed to convert to time")
+ return err
}
dst.Set(reflect.ValueOf(t))
return nil
@@ -1110,7 +1124,7 @@ func (d *Decoder) castToDuration(src ast.Node) (time.Duration, error) {
}
t, err := time.ParseDuration(s)
if err != nil {
- return 0, errors.Wrapf(err, "failed to parse duration")
+ return 0, err
}
return t, nil
}
@@ -1118,7 +1132,7 @@ func (d *Decoder) castToDuration(src ast.Node) (time.Duration, error) {
func (d *Decoder) decodeDuration(ctx context.Context, dst reflect.Value, src ast.Node) error {
t, err := d.castToDuration(src)
if err != nil {
- return errors.Wrapf(err, "failed to convert to duration")
+ return err
}
dst.Set(reflect.ValueOf(t))
return nil
@@ -1162,18 +1176,18 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
}
structFieldMap, err := structFieldMap(structType)
if err != nil {
- return errors.Wrapf(err, "failed to create struct field map")
+ return err
}
ignoreMergeKey := structFieldMap.hasMergeProperty()
keyToNodeMap, err := d.keyToValueNodeMap(src, ignoreMergeKey)
if err != nil {
- return errors.Wrapf(err, "failed to get keyToValueNodeMap")
+ return err
}
var unknownFields map[string]ast.Node
if d.disallowUnknownField {
unknownFields, err = d.keyToKeyNodeMap(src, ignoreMergeKey)
if err != nil {
- return errors.Wrapf(err, "failed to get keyToKeyNodeMap")
+ return err
}
}
@@ -1198,7 +1212,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
continue
}
if !fieldValue.CanSet() {
- return xerrors.Errorf("cannot set embedded type as unexported field %s.%s", field.PkgPath, field.Name)
+ return fmt.Errorf("cannot set embedded type as unexported field %s.%s", field.PkgPath, field.Name)
}
if fieldValue.Type().Kind() == reflect.Ptr && src.Type() == ast.NullType {
// set nil value to pointer
@@ -1213,7 +1227,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
newFieldValue, err := d.createDecodedNewValue(ctx, fieldValue.Type(), fieldValue, mapNode)
if d.disallowUnknownField {
if err := d.deleteStructKeys(fieldValue.Type(), unknownFields); err != nil {
- return errors.Wrapf(err, "cannot delete struct keys")
+ return err
}
}
@@ -1222,7 +1236,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
continue
}
var te *errors.TypeError
- if xerrors.As(err, &te) {
+ if errors.As(err, &te) {
if te.StructFieldName != nil {
fieldName := fmt.Sprintf("%s.%s", structType.Name(), *te.StructFieldName)
te.StructFieldName = &fieldName
@@ -1237,7 +1251,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
}
continue
}
- d.setDefaultValueIfConflicted(newFieldValue, structFieldMap)
+ _ = d.setDefaultValueIfConflicted(newFieldValue, structFieldMap)
fieldValue.Set(d.castToAssignableValue(newFieldValue, fieldValue.Type()))
continue
}
@@ -1258,7 +1272,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
continue
}
var te *errors.TypeError
- if xerrors.As(err, &te) {
+ if errors.As(err, &te) {
fieldName := fmt.Sprintf("%s.%s", structType.Name(), field.Name)
te.StructFieldName = &fieldName
foundErr = te
@@ -1270,7 +1284,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
fieldValue.Set(d.castToAssignableValue(newFieldValue, fieldValue.Type()))
}
if foundErr != nil {
- return errors.Wrapf(foundErr, "failed to decode value")
+ return foundErr
}
// Ignore unknown fields when parsing an inline struct (recognized by a nil token).
@@ -1315,7 +1329,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N
func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.Node) error {
arrayNode, err := d.getArrayNode(src)
if err != nil {
- return errors.Wrapf(err, "failed to get array node")
+ return err
}
if arrayNode == nil {
return nil
@@ -1347,7 +1361,7 @@ func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.No
}
dst.Set(arrayValue)
if foundErr != nil {
- return errors.Wrapf(foundErr, "failed to decode value")
+ return foundErr
}
return nil
}
@@ -1355,7 +1369,7 @@ func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.No
func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.Node) error {
arrayNode, err := d.getArrayNode(src)
if err != nil {
- return errors.Wrapf(err, "failed to get array node")
+ return err
}
if arrayNode == nil {
return nil
@@ -1384,7 +1398,7 @@ func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.No
}
dst.Set(sliceValue)
if foundErr != nil {
- return errors.Wrapf(foundErr, "failed to decode value")
+ return foundErr
}
return nil
}
@@ -1392,7 +1406,7 @@ func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.No
func (d *Decoder) decodeMapItem(ctx context.Context, dst *MapItem, src ast.Node) error {
mapNode, err := d.getMapNode(src)
if err != nil {
- return errors.Wrapf(err, "failed to get map node")
+ return err
}
if mapNode == nil {
return nil
@@ -1405,7 +1419,7 @@ func (d *Decoder) decodeMapItem(ctx context.Context, dst *MapItem, src ast.Node)
value := mapIter.Value()
if key.Type() == ast.MergeKeyType {
if err := d.decodeMapItem(ctx, dst, value); err != nil {
- return errors.Wrapf(err, "failed to decode map with merge key")
+ return err
}
return nil
}
@@ -1433,7 +1447,7 @@ func (d *Decoder) validateDuplicateKey(keyMap map[string]struct{}, key interface
func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Node) error {
mapNode, err := d.getMapNode(src)
if err != nil {
- return errors.Wrapf(err, "failed to get map node")
+ return err
}
if mapNode == nil {
return nil
@@ -1447,11 +1461,11 @@ func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Nod
if key.Type() == ast.MergeKeyType {
var m MapSlice
if err := d.decodeMapSlice(ctx, &m, value); err != nil {
- return errors.Wrapf(err, "failed to decode map with merge key")
+ return err
}
for _, v := range m {
if err := d.validateDuplicateKey(keyMap, v.Key, value); err != nil {
- return errors.Wrapf(err, "invalid map key")
+ return err
}
mapSlice = append(mapSlice, v)
}
@@ -1459,7 +1473,7 @@ func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Nod
}
k := d.nodeToValue(key)
if err := d.validateDuplicateKey(keyMap, k, key); err != nil {
- return errors.Wrapf(err, "invalid map key")
+ return err
}
mapSlice = append(mapSlice, MapItem{
Key: k,
@@ -1473,7 +1487,7 @@ func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Nod
func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node) error {
mapNode, err := d.getMapNode(src)
if err != nil {
- return errors.Wrapf(err, "failed to get map node")
+ return err
}
if mapNode == nil {
return nil
@@ -1490,24 +1504,33 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node
value := mapIter.Value()
if key.Type() == ast.MergeKeyType {
if err := d.decodeMap(ctx, dst, value); err != nil {
- return errors.Wrapf(err, "failed to decode map with merge key")
+ return err
}
iter := dst.MapRange()
for iter.Next() {
if err := d.validateDuplicateKey(keyMap, iter.Key(), value); err != nil {
- return errors.Wrapf(err, "invalid map key")
+ return err
}
mapValue.SetMapIndex(iter.Key(), iter.Value())
}
continue
}
- k := reflect.ValueOf(d.nodeToValue(key))
- if k.IsValid() && k.Type().ConvertibleTo(keyType) {
- k = k.Convert(keyType)
+
+ k := d.createDecodableValue(keyType)
+ if d.canDecodeByUnmarshaler(k) {
+ if err := d.decodeByUnmarshaler(ctx, k, key); err != nil {
+ return err
+ }
+ } else {
+ k = reflect.ValueOf(d.nodeToValue(key))
+ if k.IsValid() && k.Type().ConvertibleTo(keyType) {
+ k = k.Convert(keyType)
+ }
}
+
if k.IsValid() {
if err := d.validateDuplicateKey(keyMap, k.Interface(), key); err != nil {
- return errors.Wrapf(err, "invalid map key")
+ return err
}
}
if valueType.Kind() == reflect.Ptr && value.Type() == ast.NullType {
@@ -1530,7 +1553,7 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node
}
dst.Set(mapValue)
if foundErr != nil {
- return errors.Wrapf(foundErr, "failed to decode value")
+ return foundErr
}
return nil
}
@@ -1538,7 +1561,7 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node
func (d *Decoder) fileToReader(file string) (io.Reader, error) {
reader, err := os.Open(file)
if err != nil {
- return nil, errors.Wrapf(err, "failed to open file")
+ return nil, err
}
return reader, nil
}
@@ -1558,7 +1581,7 @@ func (d *Decoder) readersUnderDir(dir string) ([]io.Reader, error) {
pattern := fmt.Sprintf("%s/*", dir)
matches, err := filepath.Glob(pattern)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get files by %s", pattern)
+ return nil, err
}
readers := []io.Reader{}
for _, match := range matches {
@@ -1567,7 +1590,7 @@ func (d *Decoder) readersUnderDir(dir string) ([]io.Reader, error) {
}
reader, err := d.fileToReader(match)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get reader")
+ return nil, err
}
readers = append(readers, reader)
}
@@ -1576,18 +1599,18 @@ func (d *Decoder) readersUnderDir(dir string) ([]io.Reader, error) {
func (d *Decoder) readersUnderDirRecursive(dir string) ([]io.Reader, error) {
readers := []io.Reader{}
- if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err := filepath.Walk(dir, func(path string, info os.FileInfo, _ error) error {
if !d.isYAMLFile(path) {
return nil
}
- reader, err := d.fileToReader(path)
- if err != nil {
- return errors.Wrapf(err, "failed to get reader")
+ reader, readerErr := d.fileToReader(path)
+ if readerErr != nil {
+ return readerErr
}
readers = append(readers, reader)
return nil
}); err != nil {
- return nil, errors.Wrapf(err, "interrupt walk in %s", dir)
+ return nil, err
}
return readers, nil
}
@@ -1595,13 +1618,13 @@ func (d *Decoder) readersUnderDirRecursive(dir string) ([]io.Reader, error) {
func (d *Decoder) resolveReference() error {
for _, opt := range d.opts {
if err := opt(d); err != nil {
- return errors.Wrapf(err, "failed to exec option")
+ return err
}
}
for _, file := range d.referenceFiles {
reader, err := d.fileToReader(file)
if err != nil {
- return errors.Wrapf(err, "failed to get reader")
+ return err
}
d.referenceReaders = append(d.referenceReaders, reader)
}
@@ -1609,26 +1632,26 @@ func (d *Decoder) resolveReference() error {
if !d.isRecursiveDir {
readers, err := d.readersUnderDir(dir)
if err != nil {
- return errors.Wrapf(err, "failed to get readers from under the %s", dir)
+ return err
}
d.referenceReaders = append(d.referenceReaders, readers...)
} else {
readers, err := d.readersUnderDirRecursive(dir)
if err != nil {
- return errors.Wrapf(err, "failed to get readers from under the %s", dir)
+ return err
}
d.referenceReaders = append(d.referenceReaders, readers...)
}
}
for _, reader := range d.referenceReaders {
- bytes, err := ioutil.ReadAll(reader)
+ bytes, err := io.ReadAll(reader)
if err != nil {
- return errors.Wrapf(err, "failed to read buffer")
+ return err
}
// assign new anchor definition to anchorMap
if _, err := d.parse(bytes); err != nil {
- return errors.Wrapf(err, "failed to decode")
+ return err
}
}
d.isResolvedReference = true
@@ -1642,7 +1665,7 @@ func (d *Decoder) parse(bytes []byte) (*ast.File, error) {
}
f, err := parser.ParseBytes(bytes, parseMode)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse yaml")
+ return nil, err
}
normalizedFile := &ast.File{}
for _, doc := range f.Docs {
@@ -1661,16 +1684,16 @@ func (d *Decoder) isInitialized() bool {
func (d *Decoder) decodeInit() error {
if !d.isResolvedReference {
if err := d.resolveReference(); err != nil {
- return errors.Wrapf(err, "failed to resolve reference")
+ return err
}
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, d.reader); err != nil {
- return errors.Wrapf(err, "failed to copy from reader")
+ return err
}
file, err := d.parse(buf.Bytes())
if err != nil {
- return errors.Wrapf(err, "failed to decode")
+ return err
}
d.parsedFile = file
return nil
@@ -1685,7 +1708,7 @@ func (d *Decoder) decode(ctx context.Context, v reflect.Value) error {
return nil
}
if err := d.decodeValue(ctx, v.Elem(), body); err != nil {
- return errors.Wrapf(err, "failed to decode value")
+ return err
}
d.streamIndex++
return nil
@@ -1705,25 +1728,25 @@ func (d *Decoder) Decode(v interface{}) error {
func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Ptr {
- return errors.ErrDecodeRequiredPointerType
+ return ErrDecodeRequiredPointerType
}
if d.isInitialized() {
if err := d.decode(ctx, rv); err != nil {
if err == io.EOF {
return err
}
- return errors.Wrapf(err, "failed to decode")
+ return err
}
return nil
}
if err := d.decodeInit(); err != nil {
- return errors.Wrapf(err, "failed to decodeInit")
+ return err
}
if err := d.decode(ctx, rv); err != nil {
if err == io.EOF {
return err
}
- return errors.Wrapf(err, "failed to decode")
+ return err
}
return nil
}
@@ -1737,17 +1760,17 @@ func (d *Decoder) DecodeFromNode(node ast.Node, v interface{}) error {
func (d *Decoder) DecodeFromNodeContext(ctx context.Context, node ast.Node, v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Ptr {
- return errors.ErrDecodeRequiredPointerType
+ return ErrDecodeRequiredPointerType
}
if !d.isInitialized() {
if err := d.decodeInit(); err != nil {
- return errors.Wrapf(err, "failed to decodInit")
+ return err
}
}
// resolve references to the anchor on the same file
d.nodeToValue(node)
if err := d.decodeValue(ctx, rv.Elem(), node); err != nil {
- return errors.Wrapf(err, "failed to decode value")
+ return err
}
return nil
}
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/encode.go b/hack/tools/vendor/github.com/goccy/go-yaml/encode.go
index 3b9b298144..97c1fa5b7a 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/encode.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/encode.go
@@ -17,7 +17,6 @@ import (
"github.com/goccy/go-yaml/parser"
"github.com/goccy/go-yaml/printer"
"github.com/goccy/go-yaml/token"
- "golang.org/x/xerrors"
)
const (
@@ -84,19 +83,19 @@ func (e *Encoder) Encode(v interface{}) error {
func (e *Encoder) EncodeContext(ctx context.Context, v interface{}) error {
node, err := e.EncodeToNodeContext(ctx, v)
if err != nil {
- return errors.Wrapf(err, "failed to encode to node")
+ return err
}
if err := e.setCommentByCommentMap(node); err != nil {
- return errors.Wrapf(err, "failed to set comment by comment map")
+ return err
}
if !e.written {
e.written = true
} else {
// write document separator
- e.writer.Write([]byte("---\n"))
+ _, _ = e.writer.Write([]byte("---\n"))
}
var p printer.Printer
- e.writer.Write(p.PrintNode(node))
+ _, _ = e.writer.Write(p.PrintNode(node))
return nil
}
@@ -109,12 +108,12 @@ func (e *Encoder) EncodeToNode(v interface{}) (ast.Node, error) {
func (e *Encoder) EncodeToNodeContext(ctx context.Context, v interface{}) (ast.Node, error) {
for _, opt := range e.opts {
if err := opt(e); err != nil {
- return nil, errors.Wrapf(err, "failed to run option for encoder")
+ return nil, err
}
}
node, err := e.encodeValue(ctx, reflect.ValueOf(v), 1)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode value")
+ return nil, err
}
return node, nil
}
@@ -126,7 +125,7 @@ func (e *Encoder) setCommentByCommentMap(node ast.Node) error {
for path, comments := range e.commentMap {
n, err := path.FilterNode(node)
if err != nil {
- return errors.Wrapf(err, "failed to filter node")
+ return err
}
if n == nil {
continue
@@ -140,15 +139,15 @@ func (e *Encoder) setCommentByCommentMap(node ast.Node) error {
switch comment.Position {
case CommentHeadPosition:
if err := e.setHeadComment(node, n, commentGroup); err != nil {
- return errors.Wrapf(err, "failed to set head comment")
+ return err
}
case CommentLinePosition:
if err := e.setLineComment(node, n, commentGroup); err != nil {
- return errors.Wrapf(err, "failed to set line comment")
+ return err
}
case CommentFootPosition:
if err := e.setFootComment(node, n, commentGroup); err != nil {
- return errors.Wrapf(err, "failed to set foot comment")
+ return err
}
default:
return ErrUnknownCommentPositionType
@@ -166,11 +165,11 @@ func (e *Encoder) setHeadComment(node ast.Node, filtered ast.Node, comment *ast.
switch p := parent.(type) {
case *ast.MappingValueNode:
if err := p.SetComment(comment); err != nil {
- return errors.Wrapf(err, "failed to set comment")
+ return err
}
case *ast.MappingNode:
if err := p.SetComment(comment); err != nil {
- return errors.Wrapf(err, "failed to set comment")
+ return err
}
case *ast.SequenceNode:
if len(p.ValueHeadComments) == 0 {
@@ -196,11 +195,11 @@ func (e *Encoder) setLineComment(node ast.Node, filtered ast.Node, comment *ast.
// Line comment cannot be set for mapping value node.
// It should probably be set for the parent map node
if err := e.setLineCommentToParentMapNode(node, filtered, comment); err != nil {
- return errors.Wrapf(err, "failed to set line comment to parent node")
+ return err
}
default:
if err := filtered.SetComment(comment); err != nil {
- return errors.Wrapf(err, "failed to set comment")
+ return err
}
}
return nil
@@ -214,11 +213,11 @@ func (e *Encoder) setLineCommentToParentMapNode(node ast.Node, filtered ast.Node
switch p := parent.(type) {
case *ast.MappingValueNode:
if err := p.Key.SetComment(comment); err != nil {
- return errors.Wrapf(err, "failed to set comment")
+ return err
}
case *ast.MappingNode:
if err := p.SetComment(comment); err != nil {
- return errors.Wrapf(err, "failed to set comment")
+ return err
}
default:
return ErrUnsupportedLinePositionType(parent)
@@ -247,7 +246,7 @@ func (e *Encoder) setFootComment(node ast.Node, filtered ast.Node, comment *ast.
func (e *Encoder) encodeDocument(doc []byte) (ast.Node, error) {
f, err := parser.ParseBytes(doc, 0)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse yaml")
+ return nil, err
}
for _, docNode := range f.Docs {
if docNode.Body != nil {
@@ -336,11 +335,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, exists := e.marshalerFromCustomMarshalerMap(v.Type()); exists {
doc, err := marshaler(iface)
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalYAML")
+ return nil, err
}
node, err := e.encodeDocument(doc)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode document")
+ return nil, err
}
return node, nil
}
@@ -348,11 +347,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, ok := iface.(BytesMarshalerContext); ok {
doc, err := marshaler.MarshalYAML(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalYAML")
+ return nil, err
}
node, err := e.encodeDocument(doc)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode document")
+ return nil, err
}
return node, nil
}
@@ -360,11 +359,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, ok := iface.(BytesMarshaler); ok {
doc, err := marshaler.MarshalYAML()
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalYAML")
+ return nil, err
}
node, err := e.encodeDocument(doc)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode document")
+ return nil, err
}
return node, nil
}
@@ -372,7 +371,7 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, ok := iface.(InterfaceMarshalerContext); ok {
marshalV, err := marshaler.MarshalYAML(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalYAML")
+ return nil, err
}
return e.encodeValue(ctx, reflect.ValueOf(marshalV), column)
}
@@ -380,7 +379,7 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, ok := iface.(InterfaceMarshaler); ok {
marshalV, err := marshaler.MarshalYAML()
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalYAML")
+ return nil, err
}
return e.encodeValue(ctx, reflect.ValueOf(marshalV), column)
}
@@ -396,11 +395,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, ok := iface.(encoding.TextMarshaler); ok {
doc, err := marshaler.MarshalText()
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalText")
+ return nil, err
}
node, err := e.encodeDocument(doc)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode document")
+ return nil, err
}
return node, nil
}
@@ -409,21 +408,21 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column
if marshaler, ok := iface.(jsonMarshaler); ok {
jsonBytes, err := marshaler.MarshalJSON()
if err != nil {
- return nil, errors.Wrapf(err, "failed to MarshalJSON")
+ return nil, err
}
doc, err := JSONToYAML(jsonBytes)
if err != nil {
- return nil, errors.Wrapf(err, "failed to convert json to yaml")
+ return nil, err
}
node, err := e.encodeDocument(doc)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode document")
+ return nil, err
}
return node, nil
}
}
- return nil, xerrors.Errorf("does not implemented Marshaler")
+ return nil, errors.New("does not implemented Marshaler")
}
func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int) (ast.Node, error) {
@@ -433,7 +432,7 @@ func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int)
if e.canEncodeByMarshaler(v) {
node, err := e.encodeByMarshaler(ctx, v, column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode by marshaler")
+ return nil, err
}
return node, nil
}
@@ -481,7 +480,7 @@ func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int)
case reflect.Map:
return e.encodeMap(ctx, v, column), nil
default:
- return nil, xerrors.Errorf("unknown value type %s", v.Type().String())
+ return nil, fmt.Errorf("unknown value type %s", v.Type().String())
}
}
@@ -570,7 +569,7 @@ func (e *Encoder) encodeSlice(ctx context.Context, value reflect.Value) (*ast.Se
for i := 0; i < value.Len(); i++ {
node, err := e.encodeValue(ctx, value.Index(i), column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode value for slice")
+ return nil, err
}
sequence.Values = append(sequence.Values, node)
}
@@ -589,7 +588,7 @@ func (e *Encoder) encodeArray(ctx context.Context, value reflect.Value) (*ast.Se
for i := 0; i < value.Len(); i++ {
node, err := e.encodeValue(ctx, value.Index(i), column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode value for array")
+ return nil, err
}
sequence.Values = append(sequence.Values, node)
}
@@ -604,7 +603,7 @@ func (e *Encoder) encodeMapItem(ctx context.Context, item MapItem, column int) (
v := reflect.ValueOf(item.Value)
value, err := e.encodeValue(ctx, v, column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode MapItem")
+ return nil, err
}
if e.isMapNode(value) {
value.AddColumn(e.indent)
@@ -621,7 +620,7 @@ func (e *Encoder) encodeMapSlice(ctx context.Context, value MapSlice, column int
for _, item := range value {
value, err := e.encodeMapItem(ctx, item, column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode MapItem for MapSlice")
+ return nil, err
}
node.Values = append(node.Values, value)
}
@@ -730,7 +729,7 @@ func (e *Encoder) encodeAnchor(anchorName string, value ast.Node, fieldValue ref
anchorNode.Value = value
if e.anchorCallback != nil {
if err := e.anchorCallback(anchorNode, fieldValue.Interface()); err != nil {
- return nil, errors.Wrapf(err, "failed to marshal anchor")
+ return nil, err
}
if snode, ok := anchorNode.Name.(*ast.StringNode); ok {
anchorName = snode.Value
@@ -747,7 +746,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
structType := value.Type()
structFieldMap, err := structFieldMap(structType)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get struct field map")
+ return nil, err
}
hasInlineAnchorField := false
var inlineAnchorValue reflect.Value
@@ -770,7 +769,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
}
value, err := ve.encodeValue(ctx, fieldValue, column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode value")
+ return nil, err
}
if e.isMapNode(value) {
value.AddColumn(e.indent)
@@ -780,19 +779,19 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
case structField.AnchorName != "":
anchorNode, err := e.encodeAnchor(structField.AnchorName, value, fieldValue, column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode anchor")
+ return nil, err
}
value = anchorNode
case structField.IsAutoAlias:
if fieldValue.Kind() != reflect.Ptr {
- return nil, xerrors.Errorf(
+ return nil, fmt.Errorf(
"%s in struct is not pointer type. but required automatically alias detection",
structField.FieldName,
)
}
anchorName := e.anchorPtrToNameMap[fieldValue.Pointer()]
if anchorName == "" {
- return nil, xerrors.Errorf(
+ return nil, errors.New(
"cannot find anchor name from pointer address for automatically alias detection",
)
}
@@ -827,7 +826,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
if _, ok := value.(*ast.NullNode); ok {
continue
}
- return nil, xerrors.Errorf("inline value is must be map or struct type")
+ return nil, errors.New("inline value is must be map or struct type")
}
mapIter := mapNode.MapRange()
for mapIter.Next() {
@@ -846,7 +845,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
case structField.IsAutoAnchor:
anchorNode, err := e.encodeAnchor(structField.RenderName, value, fieldValue, column)
if err != nil {
- return nil, errors.Wrapf(err, "failed to encode anchor")
+ return nil, err
}
value = anchorNode
}
@@ -860,7 +859,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
anchorNode.Value = node
if e.anchorCallback != nil {
if err := e.anchorCallback(anchorNode, value.Addr().Interface()); err != nil {
- return nil, errors.Wrapf(err, "failed to marshal anchor")
+ return nil, err
}
if snode, ok := anchorNode.Name.(*ast.StringNode); ok {
anchorName = snode.Value
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/error.go b/hack/tools/vendor/github.com/goccy/go-yaml/error.go
index 163dcc5588..36e4f06ede 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/error.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/error.go
@@ -1,62 +1,65 @@
package yaml
import (
+ "errors"
+ "fmt"
+
"github.com/goccy/go-yaml/ast"
- "golang.org/x/xerrors"
)
var (
- ErrInvalidQuery = xerrors.New("invalid query")
- ErrInvalidPath = xerrors.New("invalid path instance")
- ErrInvalidPathString = xerrors.New("invalid path string")
- ErrNotFoundNode = xerrors.New("node not found")
- ErrUnknownCommentPositionType = xerrors.New("unknown comment position type")
- ErrInvalidCommentMapValue = xerrors.New("invalid comment map value. it must be not nil value")
+ ErrInvalidQuery = errors.New("invalid query")
+ ErrInvalidPath = errors.New("invalid path instance")
+ ErrInvalidPathString = errors.New("invalid path string")
+ ErrNotFoundNode = errors.New("node not found")
+ ErrUnknownCommentPositionType = errors.New("unknown comment position type")
+ ErrInvalidCommentMapValue = errors.New("invalid comment map value. it must be not nil value")
+ ErrDecodeRequiredPointerType = errors.New("required pointer type value")
)
func ErrUnsupportedHeadPositionType(node ast.Node) error {
- return xerrors.Errorf("unsupported comment head position for %s", node.Type())
+ return fmt.Errorf("unsupported comment head position for %s", node.Type())
}
func ErrUnsupportedLinePositionType(node ast.Node) error {
- return xerrors.Errorf("unsupported comment line position for %s", node.Type())
+ return fmt.Errorf("unsupported comment line position for %s", node.Type())
}
func ErrUnsupportedFootPositionType(node ast.Node) error {
- return xerrors.Errorf("unsupported comment foot position for %s", node.Type())
+ return fmt.Errorf("unsupported comment foot position for %s", node.Type())
}
// IsInvalidQueryError whether err is ErrInvalidQuery or not.
func IsInvalidQueryError(err error) bool {
- return xerrors.Is(err, ErrInvalidQuery)
+ return errors.Is(err, ErrInvalidQuery)
}
// IsInvalidPathError whether err is ErrInvalidPath or not.
func IsInvalidPathError(err error) bool {
- return xerrors.Is(err, ErrInvalidPath)
+ return errors.Is(err, ErrInvalidPath)
}
// IsInvalidPathStringError whether err is ErrInvalidPathString or not.
func IsInvalidPathStringError(err error) bool {
- return xerrors.Is(err, ErrInvalidPathString)
+ return errors.Is(err, ErrInvalidPathString)
}
// IsNotFoundNodeError whether err is ErrNotFoundNode or not.
func IsNotFoundNodeError(err error) bool {
- return xerrors.Is(err, ErrNotFoundNode)
+ return errors.Is(err, ErrNotFoundNode)
}
// IsInvalidTokenTypeError whether err is ast.ErrInvalidTokenType or not.
func IsInvalidTokenTypeError(err error) bool {
- return xerrors.Is(err, ast.ErrInvalidTokenType)
+ return errors.Is(err, ast.ErrInvalidTokenType)
}
// IsInvalidAnchorNameError whether err is ast.ErrInvalidAnchorName or not.
func IsInvalidAnchorNameError(err error) bool {
- return xerrors.Is(err, ast.ErrInvalidAnchorName)
+ return errors.Is(err, ast.ErrInvalidAnchorName)
}
// IsInvalidAliasNameError whether err is ast.ErrInvalidAliasName or not.
func IsInvalidAliasNameError(err error) bool {
- return xerrors.Is(err, ast.ErrInvalidAliasName)
+ return errors.Is(err, ast.ErrInvalidAliasName)
}
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go b/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go
index 7f1ea9af7e..5d3ca4b3dd 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go
@@ -2,12 +2,12 @@ package errors
import (
"bytes"
+ "errors"
"fmt"
"reflect"
"github.com/goccy/go-yaml/printer"
"github.com/goccy/go-yaml/token"
- "golang.org/x/xerrors"
)
const (
@@ -15,167 +15,79 @@ const (
defaultIncludeSource = true
)
-var (
- ErrDecodeRequiredPointerType = xerrors.New("required pointer type value")
-)
-
-// Wrapf wrap error for stack trace
-func Wrapf(err error, msg string, args ...interface{}) error {
- return &wrapError{
- baseError: &baseError{},
- err: xerrors.Errorf(msg, args...),
- nextErr: err,
- frame: xerrors.Caller(1),
- }
-}
-
// ErrSyntax create syntax error instance with message and token
func ErrSyntax(msg string, tk *token.Token) *syntaxError {
return &syntaxError{
- baseError: &baseError{},
- msg: msg,
- token: tk,
- frame: xerrors.Caller(1),
+ msg: msg,
+ token: tk,
}
}
-type baseError struct {
- state fmt.State
- verb rune
+// ErrOverflow creates an overflow error instance with message and a token.
+func ErrOverflow(dstType reflect.Type, num string, tk *token.Token) *overflowError {
+ return &overflowError{dstType: dstType, srcNum: num, token: tk}
}
-func (e *baseError) Error() string {
- return ""
-}
-
-func (e *baseError) chainStateAndVerb(err error) {
- wrapErr, ok := err.(*wrapError)
- if ok {
- wrapErr.state = e.state
- wrapErr.verb = e.verb
- }
- syntaxErr, ok := err.(*syntaxError)
- if ok {
- syntaxErr.state = e.state
- syntaxErr.verb = e.verb
- }
-}
-
-type wrapError struct {
- *baseError
- err error
- nextErr error
- frame xerrors.Frame
+type Printer interface {
+ // Print appends args to the message output.
+ Print(args ...any)
}
type FormatErrorPrinter struct {
- xerrors.Printer
+ Printer
Colored bool
InclSource bool
}
-func (e *wrapError) As(target interface{}) bool {
- err := e.nextErr
- for {
- if wrapErr, ok := err.(*wrapError); ok {
- err = wrapErr.nextErr
- continue
- }
- break
- }
- return xerrors.As(err, target)
-}
-
-func (e *wrapError) Unwrap() error {
- return e.nextErr
-}
-
-func (e *wrapError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error {
- return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource})
-}
-
-func (e *wrapError) FormatError(p xerrors.Printer) error {
- if _, ok := p.(*FormatErrorPrinter); !ok {
- p = &FormatErrorPrinter{
- Printer: p,
- Colored: defaultColorize,
- InclSource: defaultIncludeSource,
- }
- }
- if e.verb == 'v' && e.state.Flag('+') {
- // print stack trace for debugging
- p.Print(e.err, "\n")
- e.frame.Format(p)
- e.chainStateAndVerb(e.nextErr)
- return e.nextErr
- }
- err := e.nextErr
- for {
- if wrapErr, ok := err.(*wrapError); ok {
- err = wrapErr.nextErr
- continue
- }
- break
- }
- e.chainStateAndVerb(err)
- if fmtErr, ok := err.(xerrors.Formatter); ok {
- fmtErr.FormatError(p)
- } else {
- p.Print(err)
- }
- return nil
-}
+var (
+ As = errors.As
+ Is = errors.Is
+ New = errors.New
+)
-type wrapState struct {
- org fmt.State
+type overflowError struct {
+ dstType reflect.Type
+ srcNum string
+ token *token.Token
}
-func (s *wrapState) Write(b []byte) (n int, err error) {
- return s.org.Write(b)
+func (e *overflowError) Error() string {
+ return fmt.Sprintf("cannot unmarshal %s into Go value of type %s ( overflow )", e.srcNum, e.dstType)
}
-func (s *wrapState) Width() (wid int, ok bool) {
- return s.org.Width()
+func (e *overflowError) PrettyPrint(p Printer, colored, inclSource bool) error {
+ return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource})
}
-func (s *wrapState) Precision() (prec int, ok bool) {
- return s.org.Precision()
-}
+func (e *overflowError) FormatError(p Printer) error {
+ var pp printer.Printer
-func (s *wrapState) Flag(c int) bool {
- // set true to 'printDetail' forced because when p.Detail() is false, xerrors.Printer no output any text
- if c == '#' {
- // ignore '#' keyword because xerrors.FormatError doesn't set true to printDetail.
- // ( see https://github.com/golang/xerrors/blob/master/adaptor.go#L39-L43 )
- return false
+ var colored, inclSource bool
+ if fep, ok := p.(*FormatErrorPrinter); ok {
+ colored = fep.Colored
+ inclSource = fep.InclSource
}
- return true
-}
-func (e *wrapError) Format(state fmt.State, verb rune) {
- e.state = state
- e.verb = verb
- xerrors.FormatError(e, &wrapState{org: state}, verb)
-}
+ pos := fmt.Sprintf("[%d:%d] ", e.token.Position.Line, e.token.Position.Column)
+ msg := pp.PrintErrorMessage(fmt.Sprintf("%s%s", pos, e.Error()), colored)
+ if inclSource {
+ msg += "\n" + pp.PrintErrorToken(e.token, colored)
+ }
+ p.Print(msg)
-func (e *wrapError) Error() string {
- var buf bytes.Buffer
- e.PrettyPrint(&Sink{&buf}, defaultColorize, defaultIncludeSource)
- return buf.String()
+ return nil
}
type syntaxError struct {
- *baseError
msg string
token *token.Token
- frame xerrors.Frame
}
-func (e *syntaxError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error {
+func (e *syntaxError) PrettyPrint(p Printer, colored, inclSource bool) error {
return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource})
}
-func (e *syntaxError) FormatError(p xerrors.Printer) error {
+func (e *syntaxError) FormatError(p Printer) error {
var pp printer.Printer
var colored, inclSource bool
@@ -190,18 +102,13 @@ func (e *syntaxError) FormatError(p xerrors.Printer) error {
msg += "\n" + pp.PrintErrorToken(e.token, colored)
}
p.Print(msg)
-
- if e.verb == 'v' && e.state.Flag('+') {
- // %+v
- // print stack trace for debugging
- e.frame.Format(p)
- }
return nil
}
type PrettyPrinter interface {
- PrettyPrint(xerrors.Printer, bool, bool) error
+ PrettyPrint(Printer, bool, bool) error
}
+
type Sink struct{ *bytes.Buffer }
func (es *Sink) Print(args ...interface{}) {
@@ -236,11 +143,11 @@ func (e *TypeError) Error() string {
return fmt.Sprintf("cannot unmarshal %s into Go value of type %s", e.SrcType, e.DstType)
}
-func (e *TypeError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error {
+func (e *TypeError) PrettyPrint(p Printer, colored, inclSource bool) error {
return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource})
}
-func (e *TypeError) FormatError(p xerrors.Printer) error {
+func (e *TypeError) FormatError(p Printer) error {
var pp printer.Printer
var colored, inclSource bool
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go b/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go
index 42cc4f8fac..c4752f9f4b 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go
@@ -110,13 +110,6 @@ func (c *context) nextToken() *token.Token {
return c.tokens[c.idx+1]
}
-func (c *context) afterNextToken() *token.Token {
- if c.idx+2 >= c.size {
- return nil
- }
- return c.tokens[c.idx+2]
-}
-
func (c *context) nextNotCommentToken() *token.Token {
for i := c.idx + 1; i < c.size; i++ {
tk := c.tokens[i]
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go b/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go
index 13ada50f9d..82e066f681 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go
@@ -2,14 +2,13 @@ package parser
import (
"fmt"
- "io/ioutil"
+ "os"
"strings"
"github.com/goccy/go-yaml/ast"
"github.com/goccy/go-yaml/internal/errors"
"github.com/goccy/go-yaml/lexer"
"github.com/goccy/go-yaml/token"
- "golang.org/x/xerrors"
)
type parser struct{}
@@ -31,7 +30,7 @@ func (p *parser) parseMapping(ctx *context) (*ast.MappingNode, error) {
value, err := p.parseMappingValue(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse mapping value in mapping node")
+ return nil, err
}
mvnode, ok := value.(*ast.MappingValueNode)
if !ok {
@@ -59,7 +58,7 @@ func (p *parser) parseSequence(ctx *context) (*ast.SequenceNode, error) {
value, err := p.parseToken(ctx.withIndex(uint(len(node.Values))), tk)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse sequence value in flow sequence node")
+ return nil, err
}
node.Values = append(node.Values, value)
ctx.progress(1)
@@ -100,7 +99,7 @@ func (p *parser) parseTag(ctx *context) (*ast.TagNode, error) {
value, err = p.parseToken(ctx, ctx.currentToken())
}
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse tag value")
+ return nil, err
}
node.Value = value
return node, nil
@@ -141,7 +140,7 @@ func (p *parser) createNullToken(base *token.Token) *token.Token {
func (p *parser) parseMapValue(ctx *context, key ast.MapKeyNode, colonToken *token.Token) (ast.Node, error) {
node, err := p.createMapValueNode(ctx, key, colonToken)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create map value node")
+ return nil, err
}
if node != nil && node.GetPath() == "" {
node.SetPath(ctx.path)
@@ -175,7 +174,7 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
nullNode := ast.Null(nullToken)
if comment != nil {
- nullNode.SetComment(comment)
+ _ = nullNode.SetComment(comment)
} else {
// If there is a comment, it is already bound to the key node,
// so remove the comment from the key to bind it to the null value.
@@ -184,7 +183,7 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
if err := key.SetComment(nil); err != nil {
return nil, err
}
- nullNode.SetComment(keyComment)
+ _ = nullNode.SetComment(keyComment)
}
}
return nullNode, nil
@@ -199,17 +198,17 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
ctx.insertToken(ctx.idx, nullToken)
nullNode := ast.Null(nullToken)
if comment != nil {
- nullNode.SetComment(comment)
+ _ = nullNode.SetComment(comment)
}
return nullNode, nil
}
value, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse mapping 'value' node")
+ return nil, err
}
if comment != nil {
- value.SetComment(comment)
+ _ = value.SetComment(comment)
}
return value, nil
}
@@ -233,12 +232,12 @@ func (p *parser) validateMapValue(ctx *context, key, value ast.Node) error {
func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) {
key, err := p.parseMapKey(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse map key")
+ return nil, err
}
keyText := key.GetToken().Value
key.SetPath(ctx.withChild(keyText).path)
if err := p.validateMapKey(key.GetToken()); err != nil {
- return nil, errors.Wrapf(err, "validate mapping key error")
+ return nil, err
}
ctx.progress(1) // progress to mapping value token
tk := ctx.currentToken() // get mapping value token
@@ -247,7 +246,7 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) {
}
ctx.progress(1) // progress to value token
if err := p.setSameLineCommentIfExists(ctx.withChild(keyText), key); err != nil {
- return nil, errors.Wrapf(err, "failed to set same line comment to node")
+ return nil, err
}
if key.GetComment() != nil {
// if current token is comment, GetComment() is not nil.
@@ -257,10 +256,10 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) {
value, err := p.parseMapValue(ctx.withChild(keyText), key, tk)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse map value")
+ return nil, err
}
if err := p.validateMapValue(ctx, key, value); err != nil {
- return nil, errors.Wrapf(err, "failed to validate map value")
+ return nil, err
}
mvnode := ast.MappingValue(tk, key, value)
@@ -275,16 +274,16 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) {
ctx.progressIgnoreComment(1)
value, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse mapping node")
+ return nil, err
}
switch value.Type() {
case ast.MappingType:
- c := value.(*ast.MappingNode)
+ c, _ := value.(*ast.MappingNode)
comment := c.GetComment()
for idx, v := range c.Values {
if idx == 0 && comment != nil {
if err := v.SetComment(comment); err != nil {
- return nil, errors.Wrapf(err, "failed to set comment token to node")
+ return nil, err
}
}
node.Values = append(node.Values, v)
@@ -292,7 +291,7 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) {
case ast.MappingValueType:
node.Values = append(node.Values, value.(*ast.MappingValueNode))
default:
- return nil, xerrors.Errorf("failed to parse mapping value node node is %s", value.Type())
+ return nil, fmt.Errorf("failed to parse mapping value node node is %s", value.Type())
}
ntk = ctx.nextNotCommentToken()
antk = ctx.afterNextNotCommentToken()
@@ -340,7 +339,7 @@ func (p *parser) parseSequenceEntry(ctx *context) (*ast.SequenceNode, error) {
}
value, err := p.parseToken(ctx.withIndex(uint(len(sequenceNode.Values))), ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse sequence")
+ return nil, err
}
if comment != nil {
comment.SetPath(ctx.withIndex(uint(len(sequenceNode.Values))).path)
@@ -382,7 +381,7 @@ func (p *parser) parseAnchor(ctx *context) (*ast.AnchorNode, error) {
ctx.progress(1) // skip anchor token
name, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parser anchor name node")
+ return nil, err
}
anchor.Name = name
ntk = ctx.nextToken()
@@ -392,7 +391,7 @@ func (p *parser) parseAnchor(ctx *context) (*ast.AnchorNode, error) {
ctx.progress(1)
value, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parser anchor name node")
+ return nil, err
}
anchor.Value = value
return anchor, nil
@@ -409,7 +408,7 @@ func (p *parser) parseAlias(ctx *context) (*ast.AliasNode, error) {
ctx.progress(1) // skip alias token
name, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parser alias name node")
+ return nil, err
}
alias.Value = name
return alias, nil
@@ -448,7 +447,7 @@ func (p *parser) parseScalarValueWithComment(ctx *context, tk *token.Token) (ast
if p.isSameLineComment(ctx.nextToken(), node) {
ctx.progress(1)
if err := p.setSameLineCommentIfExists(ctx, node); err != nil {
- return nil, errors.Wrapf(err, "failed to set same line comment to node")
+ return nil, err
}
}
return node, nil
@@ -483,7 +482,7 @@ func (p *parser) parseDirective(ctx *context) (*ast.DirectiveNode, error) {
ctx.progress(1) // skip directive token
value, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse directive value")
+ return nil, err
}
node.Value = value
ctx.progress(1)
@@ -509,13 +508,13 @@ func (p *parser) parseLiteral(ctx *context) (*ast.LiteralNode, error) {
comment = p.parseCommentOnly(ctx)
comment.SetPath(ctx.path)
if err := node.SetComment(comment); err != nil {
- return nil, errors.Wrapf(err, "failed to set comment to literal")
+ return nil, err
}
tk = ctx.currentToken()
}
value, err := p.parseToken(ctx, tk)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse literal/folded value")
+ return nil, err
}
snode, ok := value.(*ast.StringNode)
if !ok {
@@ -543,7 +542,7 @@ func (p *parser) setSameLineCommentIfExists(ctx *context, node ast.Node) error {
comment := ast.CommentGroup([]*token.Token{tk})
comment.SetPath(ctx.path)
if err := node.SetComment(comment); err != nil {
- return errors.Wrapf(err, "failed to set comment token to ast.Node")
+ return err
}
return nil
}
@@ -553,7 +552,7 @@ func (p *parser) parseDocument(ctx *context) (*ast.DocumentNode, error) {
ctx.progress(1) // skip document header token
body, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse document body")
+ return nil, err
}
node := ast.Document(startTk, body)
if ntk := ctx.nextToken(); ntk != nil && ntk.Type == token.DocumentEndType {
@@ -603,14 +602,14 @@ func (p *parser) parseComment(ctx *context) (ast.Node, error) {
group := p.parseCommentOnly(ctx)
node, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse node after comment")
+ return nil, err
}
if node == nil {
return group, nil
}
group.SetPath(node.GetPath())
if err := node.SetComment(group); err != nil {
- return nil, errors.Wrapf(err, "failed to set comment token to node")
+ return nil, err
}
return node, nil
}
@@ -622,7 +621,7 @@ func (p *parser) parseMappingKey(ctx *context) (*ast.MappingKeyNode, error) {
ctx.progress(1) // skip mapping key token
value, err := p.parseToken(ctx.withChild(keyTk.Value), ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse map key")
+ return nil, err
}
node.Value = value
return node, nil
@@ -631,7 +630,7 @@ func (p *parser) parseMappingKey(ctx *context) (*ast.MappingKeyNode, error) {
func (p *parser) parseToken(ctx *context, tk *token.Token) (ast.Node, error) {
node, err := p.createNodeFromToken(ctx, tk)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create node from token")
+ return nil, err
}
if node != nil && node.GetPath() == "" {
node.SetPath(ctx.path)
@@ -649,7 +648,7 @@ func (p *parser) createNodeFromToken(ctx *context, tk *token.Token) (ast.Node, e
}
node, err := p.parseScalarValueWithComment(ctx, tk)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse scalar value")
+ return nil, err
}
if node != nil {
return node, nil
@@ -687,7 +686,7 @@ func (p *parser) parse(tokens token.Tokens, mode Mode) (*ast.File, error) {
for ctx.next() {
node, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse")
+ return nil, err
}
ctx.progressIgnoreComment(1)
if node == nil {
@@ -713,7 +712,7 @@ func ParseBytes(bytes []byte, mode Mode) (*ast.File, error) {
tokens := lexer.Tokenize(string(bytes))
f, err := Parse(tokens, mode)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse")
+ return nil, err
}
return f, nil
}
@@ -723,20 +722,20 @@ func Parse(tokens token.Tokens, mode Mode) (*ast.File, error) {
var p parser
f, err := p.parse(tokens, mode)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse")
+ return nil, err
}
return f, nil
}
// Parse parse from filename, and returns ast.File
func ParseFile(filename string, mode Mode) (*ast.File, error) {
- file, err := ioutil.ReadFile(filename)
+ file, err := os.ReadFile(filename)
if err != nil {
- return nil, errors.Wrapf(err, "failed to read file: %s", filename)
+ return nil, err
}
f, err := ParseBytes(file, mode)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse")
+ return nil, err
}
f.Name = filename
return f, nil
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/path.go b/hack/tools/vendor/github.com/goccy/go-yaml/path.go
index 72554bd8be..fcb56815e7 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/path.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/path.go
@@ -8,7 +8,6 @@ import (
"strings"
"github.com/goccy/go-yaml/ast"
- "github.com/goccy/go-yaml/internal/errors"
"github.com/goccy/go-yaml/parser"
"github.com/goccy/go-yaml/printer"
)
@@ -39,7 +38,7 @@ func PathString(s string) (*Path, error) {
case '.':
b, buf, c, err := parsePathDot(builder, buf, cursor)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse path of dot")
+ return nil, err
}
length = len(buf)
builder = b
@@ -47,13 +46,13 @@ func PathString(s string) (*Path, error) {
case '[':
b, buf, c, err := parsePathIndex(builder, buf, cursor)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse path of index")
+ return nil, err
}
length = len(buf)
builder = b
cursor = c
default:
- return nil, errors.Wrapf(ErrInvalidPathString, "invalid path at %d", cursor)
+ return nil, fmt.Errorf("invalid path at %d: %w", cursor, ErrInvalidPathString)
}
}
return builder.Build(), nil
@@ -67,18 +66,18 @@ func parsePathRecursive(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, [
c := buf[cursor]
switch c {
case '$':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '..' character")
+ return nil, nil, 0, fmt.Errorf("specified '$' after '..' character: %w", ErrInvalidPathString)
case '*':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '..' character")
+ return nil, nil, 0, fmt.Errorf("specified '*' after '..' character: %w", ErrInvalidPathString)
case '.', '[':
goto end
case ']':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '..' character")
+ return nil, nil, 0, fmt.Errorf("specified ']' after '..' character: %w", ErrInvalidPathString)
}
}
end:
if start == cursor {
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "not found recursive selector")
+ return nil, nil, 0, fmt.Errorf("not found recursive selector: %w", ErrInvalidPathString)
}
return b.Recursive(string(buf[start:cursor])), buf, cursor, nil
}
@@ -88,7 +87,7 @@ func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune,
if cursor+1 < length && buf[cursor+1] == '.' {
b, buf, c, err := parsePathRecursive(b, buf, cursor)
if err != nil {
- return nil, nil, 0, errors.Wrapf(err, "failed to parse path of recursive")
+ return nil, nil, 0, err
}
return b, buf, c, nil
}
@@ -103,18 +102,18 @@ func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune,
c := buf[cursor]
switch c {
case '$':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character")
+ return nil, nil, 0, fmt.Errorf("specified '$' after '.' character: %w", ErrInvalidPathString)
case '*':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character")
+ return nil, nil, 0, fmt.Errorf("specified '*' after '.' character: %w", ErrInvalidPathString)
case '.', '[':
goto end
case ']':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character")
+ return nil, nil, 0, fmt.Errorf("specified ']' after '.' character: %w", ErrInvalidPathString)
}
}
end:
if start == cursor {
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "cloud not find by empty key")
+ return nil, nil, 0, fmt.Errorf("cloud not find by empty key: %w", ErrInvalidPathString)
}
return b.child(string(buf[start:cursor])), buf, cursor, nil
}
@@ -136,21 +135,21 @@ func parseQuotedKey(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run
}
end:
if !foundEndDelim {
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "could not find end delimiter for key")
+ return nil, nil, 0, fmt.Errorf("could not find end delimiter for key: %w", ErrInvalidPathString)
}
if start == cursor {
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "could not find by empty key")
+ return nil, nil, 0, fmt.Errorf("could not find by empty key: %w", ErrInvalidPathString)
}
selector := buf[start:cursor]
cursor++
if cursor < length {
switch buf[cursor] {
case '$':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character")
+ return nil, nil, 0, fmt.Errorf("specified '$' after '.' character: %w", ErrInvalidPathString)
case '*':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character")
+ return nil, nil, 0, fmt.Errorf("specified '*' after '.' character: %w", ErrInvalidPathString)
case ']':
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character")
+ return nil, nil, 0, fmt.Errorf("specified ']' after '.' character: %w", ErrInvalidPathString)
}
}
return b.child(string(selector)), buf, cursor, nil
@@ -160,7 +159,7 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run
length := len(buf)
cursor++ // skip '[' character
if length <= cursor {
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "unexpected end of YAML Path")
+ return nil, nil, 0, fmt.Errorf("unexpected end of YAML Path: %w", ErrInvalidPathString)
}
c := buf[cursor]
switch c {
@@ -176,7 +175,7 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run
break
}
if buf[cursor] != ']' {
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", string(buf[cursor]), cursor)
+ return nil, nil, 0, fmt.Errorf("invalid character %s at %d: %w", string(buf[cursor]), cursor, ErrInvalidPathString)
}
numOrAll := string(buf[start:cursor])
if numOrAll == "*" {
@@ -184,11 +183,11 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run
}
num, err := strconv.ParseInt(numOrAll, 10, 64)
if err != nil {
- return nil, nil, 0, errors.Wrapf(err, "failed to parse number")
+ return nil, nil, 0, err
}
return b.Index(uint(num)), buf, cursor + 1, nil
}
- return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", c, cursor)
+ return nil, nil, 0, fmt.Errorf("invalid character %q at %d: %w", c, cursor, ErrInvalidPathString)
}
// Path represent YAMLPath ( like a JSONPath ).
@@ -205,10 +204,10 @@ func (p *Path) String() string {
func (p *Path) Read(r io.Reader, v interface{}) error {
node, err := p.ReadNode(r)
if err != nil {
- return errors.Wrapf(err, "failed to read node")
+ return err
}
if err := Unmarshal([]byte(node.String()), v); err != nil {
- return errors.Wrapf(err, "failed to unmarshal")
+ return err
}
return nil
}
@@ -220,15 +219,15 @@ func (p *Path) ReadNode(r io.Reader) (ast.Node, error) {
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
- return nil, errors.Wrapf(err, "failed to copy from reader")
+ return nil, err
}
f, err := parser.ParseBytes(buf.Bytes(), 0)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse yaml")
+ return nil, err
}
node, err := p.FilterFile(f)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter from ast.File")
+ return nil, err
}
return node, nil
}
@@ -237,10 +236,10 @@ func (p *Path) ReadNode(r io.Reader) (ast.Node, error) {
func (p *Path) Filter(target, v interface{}) error {
b, err := Marshal(target)
if err != nil {
- return errors.Wrapf(err, "failed to marshal target value")
+ return err
}
if err := p.Read(bytes.NewBuffer(b), v); err != nil {
- return errors.Wrapf(err, "failed to read")
+ return err
}
return nil
}
@@ -250,20 +249,20 @@ func (p *Path) FilterFile(f *ast.File) (ast.Node, error) {
for _, doc := range f.Docs {
node, err := p.FilterNode(doc.Body)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter node by path ( %s )", p.node)
+ return nil, err
}
if node != nil {
return node, nil
}
}
- return nil, errors.Wrapf(ErrNotFoundNode, "failed to find path ( %s )", p.node)
+ return nil, fmt.Errorf("failed to find path ( %s ): %w", p.node, ErrNotFoundNode)
}
// FilterNode filter from node by YAMLPath.
func (p *Path) FilterNode(node ast.Node) (ast.Node, error) {
n, err := p.node.filter(node)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter node by path ( %s )", p.node)
+ return nil, err
}
return n, nil
}
@@ -272,14 +271,14 @@ func (p *Path) FilterNode(node ast.Node) (ast.Node, error) {
func (p *Path) MergeFromReader(dst *ast.File, src io.Reader) error {
var buf bytes.Buffer
if _, err := io.Copy(&buf, src); err != nil {
- return errors.Wrapf(err, "failed to copy from reader")
+ return err
}
file, err := parser.ParseBytes(buf.Bytes(), 0)
if err != nil {
- return errors.Wrapf(err, "failed to parse")
+ return err
}
if err := p.MergeFromFile(dst, file); err != nil {
- return errors.Wrapf(err, "failed to merge file")
+ return err
}
return nil
}
@@ -288,11 +287,11 @@ func (p *Path) MergeFromReader(dst *ast.File, src io.Reader) error {
func (p *Path) MergeFromFile(dst *ast.File, src *ast.File) error {
base, err := p.FilterFile(dst)
if err != nil {
- return errors.Wrapf(err, "failed to filter file")
+ return err
}
for _, doc := range src.Docs {
if err := ast.Merge(base, doc); err != nil {
- return errors.Wrapf(err, "failed to merge")
+ return err
}
}
return nil
@@ -302,10 +301,10 @@ func (p *Path) MergeFromFile(dst *ast.File, src *ast.File) error {
func (p *Path) MergeFromNode(dst *ast.File, src ast.Node) error {
base, err := p.FilterFile(dst)
if err != nil {
- return errors.Wrapf(err, "failed to filter file")
+ return err
}
if err := ast.Merge(base, src); err != nil {
- return errors.Wrapf(err, "failed to merge")
+ return err
}
return nil
}
@@ -314,14 +313,14 @@ func (p *Path) MergeFromNode(dst *ast.File, src ast.Node) error {
func (p *Path) ReplaceWithReader(dst *ast.File, src io.Reader) error {
var buf bytes.Buffer
if _, err := io.Copy(&buf, src); err != nil {
- return errors.Wrapf(err, "failed to copy from reader")
+ return err
}
file, err := parser.ParseBytes(buf.Bytes(), 0)
if err != nil {
- return errors.Wrapf(err, "failed to parse")
+ return err
}
if err := p.ReplaceWithFile(dst, file); err != nil {
- return errors.Wrapf(err, "failed to replace file")
+ return err
}
return nil
}
@@ -330,7 +329,7 @@ func (p *Path) ReplaceWithReader(dst *ast.File, src io.Reader) error {
func (p *Path) ReplaceWithFile(dst *ast.File, src *ast.File) error {
for _, doc := range src.Docs {
if err := p.ReplaceWithNode(dst, doc); err != nil {
- return errors.Wrapf(err, "failed to replace file by path ( %s )", p.node)
+ return err
}
}
return nil
@@ -343,7 +342,7 @@ func (p *Path) ReplaceWithNode(dst *ast.File, node ast.Node) error {
node = node.(*ast.DocumentNode).Body
}
if err := p.node.replace(doc.Body, node); err != nil {
- return errors.Wrapf(err, "failed to replace node by path ( %s )", p.node)
+ return err
}
}
return nil
@@ -468,11 +467,11 @@ func (n *rootNode) String() string {
func (n *rootNode) filter(node ast.Node) (ast.Node, error) {
if n.child == nil {
- return nil, nil
+ return node, nil
}
filtered, err := n.child.filter(node)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
return filtered, nil
}
@@ -482,7 +481,7 @@ func (n *rootNode) replace(node ast.Node, target ast.Node) error {
return nil
}
if err := n.child.replace(node, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
return nil
}
@@ -514,7 +513,7 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
var err error
key, err = strconv.Unquote(key)
if err != nil {
- return nil, errors.Wrapf(err, "failed to unquote")
+ return nil, err
}
case '\'':
if len(key) > 1 && key[len(key)-1] == '\'' {
@@ -528,13 +527,13 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
}
filtered, err := n.child.filter(value.Value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
return filtered, nil
}
}
case ast.MappingValueType:
- value := node.(*ast.MappingValueNode)
+ value, _ := node.(*ast.MappingValueNode)
key := value.Key.GetToken().Value
if key == selector {
if n.child == nil {
@@ -542,12 +541,12 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
}
filtered, err := n.child.filter(value.Value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
return filtered, nil
}
default:
- return nil, errors.Wrapf(ErrInvalidQuery, "expected node type is map or map value. but got %s", node.Type())
+ return nil, fmt.Errorf("expected node type is map or map value. but got %s: %w", node.Type(), ErrInvalidQuery)
}
return nil, nil
}
@@ -559,11 +558,11 @@ func (n *selectorNode) replaceMapValue(value *ast.MappingValueNode, target ast.N
}
if n.child == nil {
if err := value.Replace(target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
} else {
if err := n.child.replace(value.Value, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
}
return nil
@@ -574,16 +573,16 @@ func (n *selectorNode) replace(node ast.Node, target ast.Node) error {
case ast.MappingType:
for _, value := range node.(*ast.MappingNode).Values {
if err := n.replaceMapValue(value, target); err != nil {
- return errors.Wrapf(err, "failed to replace map value")
+ return err
}
}
case ast.MappingValueType:
- value := node.(*ast.MappingValueNode)
+ value, _ := node.(*ast.MappingValueNode)
if err := n.replaceMapValue(value, target); err != nil {
- return errors.Wrapf(err, "failed to replace map value")
+ return err
}
default:
- return errors.Wrapf(ErrInvalidQuery, "expected node type is map or map value. but got %s", node.Type())
+ return fmt.Errorf("expected node type is map or map value. but got %s: %w", node.Type(), ErrInvalidQuery)
}
return nil
}
@@ -612,11 +611,11 @@ func newIndexNode(selector uint) *indexNode {
func (n *indexNode) filter(node ast.Node) (ast.Node, error) {
if node.Type() != ast.SequenceType {
- return nil, errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type())
+ return nil, fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
}
- sequence := node.(*ast.SequenceNode)
+ sequence, _ := node.(*ast.SequenceNode)
if n.selector >= uint(len(sequence.Values)) {
- return nil, errors.Wrapf(ErrInvalidQuery, "expected index is %d. but got sequences has %d items", n.selector, sequence.Values)
+ return nil, fmt.Errorf("expected index is %d. but got sequences has %d items: %w", n.selector, sequence.Values, ErrInvalidQuery)
}
value := sequence.Values[n.selector]
if n.child == nil {
@@ -624,27 +623,27 @@ func (n *indexNode) filter(node ast.Node) (ast.Node, error) {
}
filtered, err := n.child.filter(value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
return filtered, nil
}
func (n *indexNode) replace(node ast.Node, target ast.Node) error {
if node.Type() != ast.SequenceType {
- return errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type())
+ return fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
}
- sequence := node.(*ast.SequenceNode)
+ sequence, _ := node.(*ast.SequenceNode)
if n.selector >= uint(len(sequence.Values)) {
- return errors.Wrapf(ErrInvalidQuery, "expected index is %d. but got sequences has %d items", n.selector, sequence.Values)
+ return fmt.Errorf("expected index is %d. but got sequences has %d items: %w", n.selector, sequence.Values, ErrInvalidQuery)
}
if n.child == nil {
if err := sequence.Replace(int(n.selector), target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
return nil
}
if err := n.child.replace(sequence.Values[n.selector], target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
return nil
}
@@ -677,9 +676,9 @@ func (n *indexAllNode) String() string {
func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) {
if node.Type() != ast.SequenceType {
- return nil, errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type())
+ return nil, fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
}
- sequence := node.(*ast.SequenceNode)
+ sequence, _ := node.(*ast.SequenceNode)
if n.child == nil {
return sequence, nil
}
@@ -688,7 +687,7 @@ func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) {
for _, value := range sequence.Values {
filtered, err := n.child.filter(value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
out.Values = append(out.Values, filtered)
}
@@ -697,20 +696,20 @@ func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) {
func (n *indexAllNode) replace(node ast.Node, target ast.Node) error {
if node.Type() != ast.SequenceType {
- return errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type())
+ return fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
}
- sequence := node.(*ast.SequenceNode)
+ sequence, _ := node.(*ast.SequenceNode)
if n.child == nil {
for idx := range sequence.Values {
if err := sequence.Replace(idx, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
}
return nil
}
for _, value := range sequence.Values {
if err := n.child.replace(value, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
}
return nil
@@ -743,7 +742,7 @@ func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) {
for _, value := range typedNode.Values {
seq, err := n.filterNode(value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
sequence.Values = append(sequence.Values, seq.Values...)
}
@@ -754,14 +753,14 @@ func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) {
}
seq, err := n.filterNode(typedNode.Value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
sequence.Values = append(sequence.Values, seq.Values...)
case *ast.SequenceNode:
for _, value := range typedNode.Values {
seq, err := n.filterNode(value)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
sequence.Values = append(sequence.Values, seq.Values...)
}
@@ -772,7 +771,7 @@ func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) {
func (n *recursiveNode) filter(node ast.Node) (ast.Node, error) {
sequence, err := n.filterNode(node)
if err != nil {
- return nil, errors.Wrapf(err, "failed to filter")
+ return nil, err
}
sequence.Start = node.GetToken()
return sequence, nil
@@ -783,23 +782,23 @@ func (n *recursiveNode) replaceNode(node ast.Node, target ast.Node) error {
case *ast.MappingNode:
for _, value := range typedNode.Values {
if err := n.replaceNode(value, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
}
case *ast.MappingValueNode:
key := typedNode.Key.GetToken().Value
if n.selector == key {
if err := typedNode.Replace(target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
}
if err := n.replaceNode(typedNode.Value, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
case *ast.SequenceNode:
for _, value := range typedNode.Values {
if err := n.replaceNode(value, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
}
}
@@ -808,7 +807,7 @@ func (n *recursiveNode) replaceNode(node ast.Node, target ast.Node) error {
func (n *recursiveNode) replace(node ast.Node, target ast.Node) error {
if err := n.replaceNode(node, target); err != nil {
- return errors.Wrapf(err, "failed to replace")
+ return err
}
return nil
}
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go b/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go
index d5e25dc919..c71481295e 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go
@@ -6,6 +6,7 @@ import (
"strings"
"github.com/fatih/color"
+
"github.com/goccy/go-yaml/ast"
"github.com/goccy/go-yaml/token"
)
@@ -29,6 +30,7 @@ type Printer struct {
Bool PrintFunc
String PrintFunc
Number PrintFunc
+ Comment PrintFunc
}
func defaultLineNumberFormat(num int) string {
@@ -82,6 +84,11 @@ func (p *Printer) property(tk *token.Token) *Property {
return p.Number()
}
return prop
+ case token.CommentType:
+ if p.Comment != nil {
+ return p.Comment()
+ }
+ return prop
default:
}
return prop
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go
index 3aaec56189..5f92a6ef5a 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go
@@ -42,7 +42,7 @@ func createContext() *Context {
}
func newContext(src []rune) *Context {
- ctx := ctxPool.Get().(*Context)
+ ctx, _ := ctxPool.Get().(*Context)
ctx.reset(src)
return ctx
}
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go
index b0eac48d24..a559555a99 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go
@@ -1,11 +1,11 @@
package scanner
import (
+ "errors"
"io"
"strings"
"github.com/goccy/go-yaml/token"
- "golang.org/x/xerrors"
)
// IndentState state for indent
@@ -25,18 +25,24 @@ const (
// Scanner holds the scanner's internal state while processing a given text.
// It can be allocated as part of another data structure but must be initialized via Init before use.
type Scanner struct {
- source []rune
- sourcePos int
- sourceSize int
- line int
- column int
- offset int
- prevIndentLevel int
- prevIndentNum int
- prevIndentColumn int
- docStartColumn int
+ source []rune
+ sourcePos int
+ sourceSize int
+ // line number. This number starts from 1.
+ line int
+ // column number. This number starts from 1.
+ column int
+ // offset represents the offset from the beginning of the source.
+ offset int
+ // lastDelimColumn is the last column needed to compare indent is retained.
+ lastDelimColumn int
+ // indentNum indicates the number of spaces used for indentation.
+ indentNum int
+ // prevLineIndentNum indicates the number of spaces used for indentation at previous line.
+ prevLineIndentNum int
+ // indentLevel indicates the level of indent depth. This value does not match the column value.
indentLevel int
- indentNum int
+ docStartColumn int
isFirstCharAtLine bool
isAnchor bool
startedFlowSequenceNum int
@@ -90,6 +96,7 @@ func (s *Scanner) progressColumn(ctx *Context, num int) {
}
func (s *Scanner) progressLine(ctx *Context) {
+ s.prevLineIndentNum = s.indentNum
s.column = 1
s.line++
s.offset++
@@ -99,19 +106,6 @@ func (s *Scanner) progressLine(ctx *Context) {
ctx.progress(1)
}
-func (s *Scanner) isNeededKeepPreviousIndentNum(ctx *Context, c rune) bool {
- if !s.isChangedToIndentStateUp() {
- return false
- }
- if ctx.isDocument() {
- return true
- }
- if c == '-' && ctx.existsBuffer() {
- return true
- }
- return false
-}
-
func (s *Scanner) isNewLineChar(c rune) bool {
if c == '\n' {
return true
@@ -140,42 +134,38 @@ func (s *Scanner) newLineCount(src []rune) int {
return cnt
}
-func (s *Scanner) updateIndentState(ctx *Context) {
- indentNumBasedIndentState := s.indentState
- if s.prevIndentNum < s.indentNum {
- s.indentLevel = s.prevIndentLevel + 1
- indentNumBasedIndentState = IndentStateUp
- } else if s.prevIndentNum == s.indentNum {
- s.indentLevel = s.prevIndentLevel
- indentNumBasedIndentState = IndentStateEqual
- } else {
- indentNumBasedIndentState = IndentStateDown
- if s.prevIndentLevel > 0 {
- s.indentLevel = s.prevIndentLevel - 1
+func (s *Scanner) updateIndentLevel() {
+ if s.prevLineIndentNum < s.indentNum {
+ s.indentLevel++
+ } else if s.prevLineIndentNum > s.indentNum {
+ if s.indentLevel > 0 {
+ s.indentLevel--
}
}
+}
- if s.prevIndentColumn > 0 {
- if s.prevIndentColumn < s.column {
+func (s *Scanner) updateIndentState(ctx *Context) {
+ if s.lastDelimColumn > 0 {
+ if s.lastDelimColumn < s.column {
s.indentState = IndentStateUp
- } else if s.prevIndentColumn != s.column || indentNumBasedIndentState != IndentStateEqual {
- // The following case ( current position is 'd' ), some variables becomes like here
- // - prevIndentColumn: 1 of 'a'
- // - indentNumBasedIndentState: IndentStateDown because d's indentNum(1) is less than c's indentNum(3).
- // Therefore, s.prevIndentColumn(1) == s.column(1) is true, but we want to treat this as IndentStateDown.
- // So, we look also current indentState value by the above prevIndentNum based logic, and determins finally indentState.
- // ---
- // a:
- // b
- // c
- // d: e
- // ^
- s.indentState = IndentStateDown
} else {
- s.indentState = IndentStateEqual
+ // If lastDelimColumn and s.column are the same,
+ // treat as Down state since it is the same column as delimiter.
+ s.indentState = IndentStateDown
}
} else {
- s.indentState = indentNumBasedIndentState
+ s.indentState = s.indentStateFromIndentNumDifference()
+ }
+}
+
+func (s *Scanner) indentStateFromIndentNumDifference() IndentState {
+ switch {
+ case s.prevLineIndentNum < s.indentNum:
+ return IndentStateUp
+ case s.prevLineIndentNum == s.indentNum:
+ return IndentStateEqual
+ default:
+ return IndentStateDown
}
}
@@ -191,16 +181,9 @@ func (s *Scanner) updateIndent(ctx *Context, c rune) {
s.indentState = IndentStateKeep
return
}
+ s.updateIndentLevel()
s.updateIndentState(ctx)
s.isFirstCharAtLine = false
- if s.isNeededKeepPreviousIndentNum(ctx, c) {
- return
- }
- if s.indentState != IndentStateUp {
- s.prevIndentColumn = 0
- }
- s.prevIndentNum = s.indentNum
- s.prevIndentLevel = s.indentLevel
}
func (s *Scanner) isChangedToIndentStateDown() bool {
@@ -211,10 +194,6 @@ func (s *Scanner) isChangedToIndentStateUp() bool {
return s.indentState == IndentStateUp
}
-func (s *Scanner) isChangedToIndentStateEqual() bool {
- return s.indentState == IndentStateEqual
-}
-
func (s *Scanner) addBufferedTokenIfExists(ctx *Context) {
ctx.addToken(s.bufferedToken(ctx))
}
@@ -316,100 +295,93 @@ func (s *Scanner) scanDoubleQuote(ctx *Context) (tk *token.Token, pos int) {
continue
} else if c == '\\' {
isFirstLineChar = false
- if idx+1 < size {
- nextChar := src[idx+1]
- switch nextChar {
- case 'b':
- ctx.addOriginBuf(nextChar)
- value = append(value, '\b')
- idx++
- continue
- case 'e':
- ctx.addOriginBuf(nextChar)
- value = append(value, '\x1B')
- idx++
- continue
- case 'f':
- ctx.addOriginBuf(nextChar)
- value = append(value, '\f')
- idx++
- continue
- case 'n':
- ctx.addOriginBuf(nextChar)
- value = append(value, '\n')
- idx++
- continue
- case 'r':
- ctx.addOriginBuf(nextChar)
- value = append(value, '\r')
- idx++
- continue
- case 'v':
- ctx.addOriginBuf(nextChar)
- value = append(value, '\v')
- idx++
- continue
- case 'L': // LS (#x2028)
- ctx.addOriginBuf(nextChar)
- value = append(value, []rune{'\xE2', '\x80', '\xA8'}...)
- idx++
- continue
- case 'N': // NEL (#x85)
- ctx.addOriginBuf(nextChar)
- value = append(value, []rune{'\xC2', '\x85'}...)
- idx++
- continue
- case 'P': // PS (#x2029)
- ctx.addOriginBuf(nextChar)
- value = append(value, []rune{'\xE2', '\x80', '\xA9'}...)
- idx++
- continue
- case '_': // #xA0
- ctx.addOriginBuf(nextChar)
- value = append(value, []rune{'\xC2', '\xA0'}...)
- idx++
- continue
- case '"':
- ctx.addOriginBuf(nextChar)
- value = append(value, nextChar)
- idx++
- continue
- case 'x':
- if idx+3 >= size {
- // TODO: need to return error
- //err = xerrors.New("invalid escape character \\x")
- return
- }
- codeNum := hexRunesToInt(src[idx+2 : idx+4])
- value = append(value, rune(codeNum))
- idx += 3
- continue
- case 'u':
- if idx+5 >= size {
- // TODO: need to return error
- //err = xerrors.New("invalid escape character \\u")
- return
- }
- codeNum := hexRunesToInt(src[idx+2 : idx+6])
- value = append(value, rune(codeNum))
- idx += 5
- continue
- case 'U':
- if idx+9 >= size {
- // TODO: need to return error
- //err = xerrors.New("invalid escape character \\U")
- return
- }
- codeNum := hexRunesToInt(src[idx+2 : idx+10])
- value = append(value, rune(codeNum))
- idx += 9
- continue
- case '\\':
- ctx.addOriginBuf(nextChar)
- idx++
+ if idx+1 >= size {
+ value = append(value, c)
+ continue
+ }
+ nextChar := src[idx+1]
+ progress := 0
+ switch nextChar {
+ case 'b':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\b')
+ case 'e':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\x1B')
+ case 'f':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\f')
+ case 'n':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\n')
+ case 'r':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\r')
+ case 'v':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\v')
+ case 'L': // LS (#x2028)
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, []rune{'\xE2', '\x80', '\xA8'}...)
+ case 'N': // NEL (#x85)
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, []rune{'\xC2', '\x85'}...)
+ case 'P': // PS (#x2029)
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, []rune{'\xE2', '\x80', '\xA9'}...)
+ case '_': // #xA0
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, []rune{'\xC2', '\xA0'}...)
+ case '"':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, nextChar)
+ case 'x':
+ progress = 3
+ if idx+progress >= size {
+ // TODO: need to return error
+ //err = errors.New("invalid escape character \\x")
+ return
+ }
+ codeNum := hexRunesToInt(src[idx+2 : idx+progress+1])
+ value = append(value, rune(codeNum))
+ case 'u':
+ progress = 5
+ if idx+progress >= size {
+ // TODO: need to return error
+ //err = errors.New("invalid escape character \\u")
+ return
+ }
+ codeNum := hexRunesToInt(src[idx+2 : idx+progress+1])
+ value = append(value, rune(codeNum))
+ case 'U':
+ progress = 9
+ if idx+progress >= size {
+ // TODO: need to return error
+ //err = errors.New("invalid escape character \\U")
+ return
}
+ codeNum := hexRunesToInt(src[idx+2 : idx+progress+1])
+ value = append(value, rune(codeNum))
+ case '\\':
+ progress = 1
+ ctx.addOriginBuf(nextChar)
+ value = append(value, c)
+ default:
+ value = append(value, c)
}
- value = append(value, c)
+ idx += progress
+ s.progressColumn(ctx, progress)
continue
} else if c != '"' {
value = append(value, c)
@@ -476,7 +448,6 @@ func (s *Scanner) scanComment(ctx *Context) (tk *token.Token, pos int) {
ctx.addOriginBuf('#')
ctx.progress(1) // skip '#' character
for idx, c := range ctx.src[ctx.idx:] {
- pos = idx + 1
ctx.addOriginBuf(c)
switch c {
case '\n', '\r':
@@ -502,7 +473,7 @@ func trimCommentFromLiteralOpt(text string) (string, error) {
return text, nil
}
if idx == 0 {
- return "", xerrors.New("invalid literal header")
+ return "", errors.New("invalid literal header")
}
return text[:idx-1], nil
}
@@ -538,7 +509,225 @@ func (s *Scanner) scanLiteral(ctx *Context, c rune) {
}
}
-func (s *Scanner) scanLiteralHeader(ctx *Context) (pos int, err error) {
+func (s *Scanner) scanNewLine(ctx *Context, c rune) {
+ if len(ctx.buf) > 0 && s.savedPos == nil {
+ s.savedPos = s.pos()
+ s.savedPos.Column -= len(ctx.bufferedSrc())
+ }
+
+ // if the following case, origin buffer has unnecessary two spaces.
+ // So, `removeRightSpaceFromOriginBuf` remove them, also fix column number too.
+ // ---
+ // a:[space][space]
+ // b: c
+ removedNum := ctx.removeRightSpaceFromBuf()
+ if removedNum > 0 {
+ s.column -= removedNum
+ s.offset -= removedNum
+ if s.savedPos != nil {
+ s.savedPos.Column -= removedNum
+ }
+ }
+
+ // There is no problem that we ignore CR which followed by LF and normalize it to LF, because of following YAML1.2 spec.
+ // > Line breaks inside scalar content must be normalized by the YAML processor. Each such line break must be parsed into a single line feed character.
+ // > Outside scalar content, YAML allows any line break to be used to terminate lines.
+ // > -- https://yaml.org/spec/1.2/spec.html
+ if c == '\r' && ctx.nextChar() == '\n' {
+ ctx.addOriginBuf('\r')
+ ctx.progress(1)
+ c = '\n'
+ }
+
+ if ctx.isEOS() {
+ s.addBufferedTokenIfExists(ctx)
+ } else if s.isAnchor {
+ s.addBufferedTokenIfExists(ctx)
+ }
+ ctx.addBuf(' ')
+ ctx.addOriginBuf(c)
+ ctx.isSingleLine = false
+ s.progressLine(ctx)
+}
+
+func (s *Scanner) scanFlowMapStart(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
+ }
+
+ ctx.addOriginBuf('{')
+ ctx.addToken(token.MappingStart(string(ctx.obuf), s.pos()))
+ s.startedFlowMapNum++
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanFlowMapEnd(ctx *Context) bool {
+ if s.startedFlowMapNum <= 0 {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addOriginBuf('}')
+ ctx.addToken(token.MappingEnd(string(ctx.obuf), s.pos()))
+ s.startedFlowMapNum--
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanFlowArrayStart(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
+ }
+
+ ctx.addOriginBuf('[')
+ ctx.addToken(token.SequenceStart(string(ctx.obuf), s.pos()))
+ s.startedFlowSequenceNum++
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanFlowArrayEnd(ctx *Context) bool {
+ if s.startedFlowSequenceNum <= 0 {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addOriginBuf(']')
+ ctx.addToken(token.SequenceEnd(string(ctx.obuf), s.pos()))
+ s.startedFlowSequenceNum--
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanFlowEntry(ctx *Context, c rune) bool {
+ if s.startedFlowSequenceNum <= 0 && s.startedFlowMapNum <= 0 {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addOriginBuf(c)
+ ctx.addToken(token.CollectEntry(string(ctx.obuf), s.pos()))
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanMapDelim(ctx *Context) bool {
+ nc := ctx.nextChar()
+ if s.startedFlowMapNum <= 0 && nc != ' ' && !s.isNewLineChar(nc) && !ctx.isNextEOS() {
+ return false
+ }
+
+ // mapping value
+ tk := s.bufferedToken(ctx)
+ if tk != nil {
+ s.lastDelimColumn = tk.Position.Column
+ ctx.addToken(tk)
+ } else if tk := ctx.lastToken(); tk != nil {
+ // If the map key is quote, the buffer does not exist because it has already been cut into tokens.
+ // Therefore, we need to check the last token.
+ if tk.Indicator == token.QuotedScalarIndicator {
+ s.lastDelimColumn = tk.Position.Column
+ }
+ }
+ ctx.addToken(token.MappingValue(s.pos()))
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanDocumentStart(ctx *Context) bool {
+ if s.indentNum != 0 {
+ return false
+ }
+ if s.column != 1 {
+ return false
+ }
+ if ctx.repeatNum('-') != 3 {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addToken(token.DocumentHeader(string(ctx.obuf)+"---", s.pos()))
+ s.progressColumn(ctx, 3)
+ return true
+}
+
+func (s *Scanner) scanDocumentEnd(ctx *Context) bool {
+ if s.indentNum != 0 {
+ return false
+ }
+ if s.column != 1 {
+ return false
+ }
+ if ctx.repeatNum('.') != 3 {
+ return false
+ }
+
+ ctx.addToken(token.DocumentEnd(string(ctx.obuf)+"...", s.pos()))
+ s.progressColumn(ctx, 3)
+ return true
+}
+
+func (s *Scanner) scanMergeKey(ctx *Context) bool {
+ if !s.isMergeKey(ctx) {
+ return false
+ }
+
+ s.lastDelimColumn = s.column
+ ctx.addToken(token.MergeKey(string(ctx.obuf)+"<<", s.pos()))
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanRawFoldedChar(ctx *Context) bool {
+ if !ctx.existsBuffer() {
+ return false
+ }
+ if !s.isChangedToIndentStateUp() {
+ return false
+ }
+
+ ctx.isRawFolded = true
+ ctx.addBuf('-')
+ ctx.addOriginBuf('-')
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanSequence(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
+ }
+
+ nc := ctx.nextChar()
+ if nc != ' ' && !s.isNewLineChar(nc) {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addOriginBuf('-')
+ tk := token.SequenceEntry(string(ctx.obuf), s.pos())
+ s.lastDelimColumn = tk.Position.Column
+ ctx.addToken(tk)
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanLiteralHeader(ctx *Context) (bool, error) {
+ if ctx.existsBuffer() {
+ return false, nil
+ }
+
+ progress, err := s.scanLiteralHeaderOption(ctx)
+ if err != nil {
+ return false, err
+ }
+ s.progressColumn(ctx, progress)
+ s.progressLine(ctx)
+ return true, nil
+}
+
+func (s *Scanner) scanLiteralHeaderOption(ctx *Context) (pos int, err error) {
header := ctx.currentChar()
ctx.addOriginBuf(header)
ctx.progress(1) // skip '|' or '>' character
@@ -594,116 +783,117 @@ func (s *Scanner) scanLiteralHeader(ctx *Context) (pos int, err error) {
ctx.literalOpt = opt
return
}
- break
}
}
- err = xerrors.New("invalid literal header")
+ err = errors.New("invalid literal header")
return
}
-func (s *Scanner) scanNewLine(ctx *Context, c rune) {
- if len(ctx.buf) > 0 && s.savedPos == nil {
- s.savedPos = s.pos()
- s.savedPos.Column -= len(ctx.bufferedSrc())
+func (s *Scanner) scanMapKey(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
}
- // if the following case, origin buffer has unnecessary two spaces.
- // So, `removeRightSpaceFromOriginBuf` remove them, also fix column number too.
- // ---
- // a:[space][space]
- // b: c
- removedNum := ctx.removeRightSpaceFromBuf()
- if removedNum > 0 {
- s.column -= removedNum
- s.offset -= removedNum
- if s.savedPos != nil {
- s.savedPos.Column -= removedNum
- }
+ nc := ctx.nextChar()
+ if nc != ' ' {
+ return false
}
- if ctx.isEOS() {
- s.addBufferedTokenIfExists(ctx)
- } else if s.isAnchor {
- s.addBufferedTokenIfExists(ctx)
+ ctx.addToken(token.MappingKey(s.pos()))
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanDirective(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
}
- ctx.addBuf(' ')
- ctx.addOriginBuf(c)
- ctx.isSingleLine = false
- s.progressLine(ctx)
+ if s.indentNum != 0 {
+ return false
+ }
+
+ ctx.addToken(token.Directive(string(ctx.obuf)+"%", s.pos()))
+ s.progressColumn(ctx, 1)
+ return true
+}
+
+func (s *Scanner) scanAnchor(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addOriginBuf('&')
+ ctx.addToken(token.Anchor(string(ctx.obuf), s.pos()))
+ s.progressColumn(ctx, 1)
+ s.isAnchor = true
+ return true
+}
+
+func (s *Scanner) scanAlias(ctx *Context) bool {
+ if ctx.existsBuffer() {
+ return false
+ }
+
+ s.addBufferedTokenIfExists(ctx)
+ ctx.addOriginBuf('*')
+ ctx.addToken(token.Alias(string(ctx.obuf), s.pos()))
+ s.progressColumn(ctx, 1)
+ return true
}
func (s *Scanner) scan(ctx *Context) (pos int) {
for ctx.next() {
pos = ctx.nextPos()
c := ctx.currentChar()
+
+ // First, change the IndentState.
+ // If the target character is the first character in a line, IndentState is Up/Down/Equal state.
+ // The second and subsequent letters are Keep.
s.updateIndent(ctx, c)
+
+ // If IndentState is down, tokens are split, so the buffer accumulated until that point needs to be cutted as a token.
+ if s.isChangedToIndentStateDown() {
+ s.addBufferedTokenIfExists(ctx)
+ }
if ctx.isDocument() {
- if s.isChangedToIndentStateEqual() ||
- s.isChangedToIndentStateDown() {
- s.addBufferedTokenIfExists(ctx)
+ if s.isChangedToIndentStateDown() {
s.breakLiteral(ctx)
} else {
s.scanLiteral(ctx, c)
continue
}
- } else if s.isChangedToIndentStateDown() {
- s.addBufferedTokenIfExists(ctx)
- } else if s.isChangedToIndentStateEqual() {
- // if first character is new line character, buffer expect to raw folded literal
- if len(ctx.obuf) > 0 && s.newLineCount(ctx.obuf) <= 1 {
- // doesn't raw folded literal
- s.addBufferedTokenIfExists(ctx)
- }
}
switch c {
case '{':
- if !ctx.existsBuffer() {
- ctx.addOriginBuf(c)
- ctx.addToken(token.MappingStart(string(ctx.obuf), s.pos()))
- s.startedFlowMapNum++
- s.progressColumn(ctx, 1)
+ if s.scanFlowMapStart(ctx) {
return
}
case '}':
- if !ctx.existsBuffer() || s.startedFlowMapNum > 0 {
- ctx.addToken(s.bufferedToken(ctx))
- ctx.addOriginBuf(c)
- ctx.addToken(token.MappingEnd(string(ctx.obuf), s.pos()))
- s.startedFlowMapNum--
- s.progressColumn(ctx, 1)
+ if s.scanFlowMapEnd(ctx) {
return
}
case '.':
- if s.indentNum == 0 && s.column == 1 && ctx.repeatNum('.') == 3 {
- ctx.addToken(token.DocumentEnd(string(ctx.obuf)+"...", s.pos()))
- s.progressColumn(ctx, 3)
+ if s.scanDocumentEnd(ctx) {
pos += 2
return
}
case '<':
- if s.isMergeKey(ctx) {
- s.prevIndentColumn = s.column
- ctx.addToken(token.MergeKey(string(ctx.obuf)+"<<", s.pos()))
- s.progressColumn(ctx, 1)
+ if s.scanMergeKey(ctx) {
pos++
return
}
case '-':
- if s.indentNum == 0 && s.column == 1 && ctx.repeatNum('-') == 3 {
- s.addBufferedTokenIfExists(ctx)
- ctx.addToken(token.DocumentHeader(string(ctx.obuf)+"---", s.pos()))
- s.progressColumn(ctx, 3)
+ if s.scanDocumentStart(ctx) {
pos += 2
return
}
- if ctx.existsBuffer() && s.isChangedToIndentStateUp() {
- // raw folded
- ctx.isRawFolded = true
- ctx.addBuf(c)
- ctx.addOriginBuf(c)
- s.progressColumn(ctx, 1)
+ if s.scanRawFoldedChar(ctx) {
continue
}
+ if s.scanSequence(ctx) {
+ return
+ }
if ctx.existsBuffer() {
// '-' is literal
ctx.addBuf(c)
@@ -711,69 +901,29 @@ func (s *Scanner) scan(ctx *Context) (pos int) {
s.progressColumn(ctx, 1)
continue
}
- nc := ctx.nextChar()
- if nc == ' ' || s.isNewLineChar(nc) {
- s.addBufferedTokenIfExists(ctx)
- ctx.addOriginBuf(c)
- tk := token.SequenceEntry(string(ctx.obuf), s.pos())
- s.prevIndentColumn = tk.Position.Column
- ctx.addToken(tk)
- s.progressColumn(ctx, 1)
- return
- }
case '[':
- if !ctx.existsBuffer() {
- ctx.addOriginBuf(c)
- ctx.addToken(token.SequenceStart(string(ctx.obuf), s.pos()))
- s.startedFlowSequenceNum++
- s.progressColumn(ctx, 1)
+ if s.scanFlowArrayStart(ctx) {
return
}
case ']':
- if !ctx.existsBuffer() || s.startedFlowSequenceNum > 0 {
- s.addBufferedTokenIfExists(ctx)
- ctx.addOriginBuf(c)
- ctx.addToken(token.SequenceEnd(string(ctx.obuf), s.pos()))
- s.startedFlowSequenceNum--
- s.progressColumn(ctx, 1)
+ if s.scanFlowArrayEnd(ctx) {
return
}
case ',':
- if s.startedFlowSequenceNum > 0 || s.startedFlowMapNum > 0 {
- s.addBufferedTokenIfExists(ctx)
- ctx.addOriginBuf(c)
- ctx.addToken(token.CollectEntry(string(ctx.obuf), s.pos()))
- s.progressColumn(ctx, 1)
+ if s.scanFlowEntry(ctx, c) {
return
}
case ':':
- nc := ctx.nextChar()
- if s.startedFlowMapNum > 0 || nc == ' ' || s.isNewLineChar(nc) || ctx.isNextEOS() {
- // mapping value
- tk := s.bufferedToken(ctx)
- if tk != nil {
- s.prevIndentColumn = tk.Position.Column
- ctx.addToken(tk)
- } else if tk := ctx.lastToken(); tk != nil {
- // If the map key is quote, the buffer does not exist because it has already been cut into tokens.
- // Therefore, we need to check the last token.
- if tk.Indicator == token.QuotedScalarIndicator {
- s.prevIndentColumn = tk.Position.Column
- }
- }
- ctx.addToken(token.MappingValue(s.pos()))
- s.progressColumn(ctx, 1)
+ if s.scanMapDelim(ctx) {
return
}
case '|', '>':
- if !ctx.existsBuffer() {
- progress, err := s.scanLiteralHeader(ctx)
- if err != nil {
- // TODO: returns syntax error object
- return
- }
- s.progressColumn(ctx, progress)
- s.progressLine(ctx)
+ scanned, err := s.scanLiteralHeader(ctx)
+ if err != nil {
+ // TODO: returns syntax error object
+ return
+ }
+ if scanned {
continue
}
case '!':
@@ -788,33 +938,19 @@ func (s *Scanner) scan(ctx *Context) (pos int) {
return
}
case '%':
- if !ctx.existsBuffer() && s.indentNum == 0 {
- ctx.addToken(token.Directive(string(ctx.obuf)+"%", s.pos()))
- s.progressColumn(ctx, 1)
+ if s.scanDirective(ctx) {
return
}
case '?':
- nc := ctx.nextChar()
- if !ctx.existsBuffer() && nc == ' ' {
- ctx.addToken(token.MappingKey(s.pos()))
- s.progressColumn(ctx, 1)
+ if s.scanMapKey(ctx) {
return
}
case '&':
- if !ctx.existsBuffer() {
- s.addBufferedTokenIfExists(ctx)
- ctx.addOriginBuf(c)
- ctx.addToken(token.Anchor(string(ctx.obuf), s.pos()))
- s.progressColumn(ctx, 1)
- s.isAnchor = true
+ if s.scanAnchor(ctx) {
return
}
case '*':
- if !ctx.existsBuffer() {
- s.addBufferedTokenIfExists(ctx)
- ctx.addOriginBuf(c)
- ctx.addToken(token.Alias(string(ctx.obuf), s.pos()))
- s.progressColumn(ctx, 1)
+ if s.scanAlias(ctx) {
return
}
case '#':
@@ -840,15 +976,6 @@ func (s *Scanner) scan(ctx *Context) (pos int) {
return
}
case '\r', '\n':
- // There is no problem that we ignore CR which followed by LF and normalize it to LF, because of following YAML1.2 spec.
- // > Line breaks inside scalar content must be normalized by the YAML processor. Each such line break must be parsed into a single line feed character.
- // > Outside scalar content, YAML allows any line break to be used to terminate lines.
- // > -- https://yaml.org/spec/1.2/spec.html
- if c == '\r' && ctx.nextChar() == '\n' {
- ctx.addOriginBuf('\r')
- ctx.progress(1)
- c = '\n'
- }
s.scanNewLine(ctx, c)
continue
case ' ':
@@ -885,9 +1012,8 @@ func (s *Scanner) Init(text string) {
s.line = 1
s.column = 1
s.offset = 1
- s.prevIndentLevel = 0
- s.prevIndentNum = 0
- s.prevIndentColumn = 0
+ s.prevLineIndentNum = 0
+ s.lastDelimColumn = 0
s.indentLevel = 0
s.indentNum = 0
s.isFirstCharAtLine = true
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/struct.go b/hack/tools/vendor/github.com/goccy/go-yaml/struct.go
index a3da8ddd3c..dae4d650d9 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/struct.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/struct.go
@@ -1,10 +1,9 @@
package yaml
import (
+ "fmt"
"reflect"
"strings"
-
- "golang.org/x/xerrors"
)
const (
@@ -84,11 +83,7 @@ func isIgnoredStructField(field reflect.StructField) bool {
// private field
return true
}
- tag := getTag(field)
- if tag == "-" {
- return true
- }
- return false
+ return getTag(field) == "-"
}
type StructFieldMap map[string]*StructField
@@ -121,7 +116,7 @@ func structFieldMap(structType reflect.Type) (StructFieldMap, error) {
}
structField := structField(field)
if _, exists := renderNameMap[structField.RenderName]; exists {
- return nil, xerrors.Errorf("duplicated struct field name %s", structField.RenderName)
+ return nil, fmt.Errorf("duplicated struct field name %s", structField.RenderName)
}
structFieldMap[structField.FieldName] = structField
renderNameMap[structField.RenderName] = struct{}{}
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go b/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go
index c86caab24a..bc8b531cbf 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go
@@ -339,9 +339,12 @@ func reservedKeywordToken(typ Type, value, org string, pos *Position) *Token {
func init() {
for _, keyword := range reservedNullKeywords {
- reservedKeywordMap[keyword] = func(value, org string, pos *Position) *Token {
+ f := func(value, org string, pos *Position) *Token {
return reservedKeywordToken(NullType, value, org, pos)
}
+
+ reservedKeywordMap[keyword] = f
+ reservedEncKeywordMap[keyword] = f
}
for _, keyword := range reservedBoolKeywords {
f := func(value, org string, pos *Position) *Token {
@@ -623,7 +626,7 @@ func IsNeedQuoted(value string) bool {
}
first := value[0]
switch first {
- case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ':
+ case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ', '`':
return true
}
last := value[len(value)-1]
diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go b/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go
index 25b1056fe3..a90560e192 100644
--- a/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go
+++ b/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go
@@ -9,7 +9,6 @@ import (
"github.com/goccy/go-yaml/ast"
"github.com/goccy/go-yaml/internal/errors"
- "golang.org/x/xerrors"
)
// BytesMarshaler interface may be implemented by types to customize their
@@ -80,11 +79,11 @@ func (s MapSlice) ToMap() map[interface{}]interface{} {
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
-// Struct fields are only marshalled if they are exported (have an upper case
-// first letter), and are marshalled using the field name lowercased as the
+// Struct fields are only marshaled if they are exported (have an upper case
+// first letter), and are marshaled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
+// following comma-separated options are used to tweak the marshaling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
@@ -138,7 +137,7 @@ func MarshalWithOptions(v interface{}, opts ...EncodeOption) ([]byte, error) {
func MarshalContext(ctx context.Context, v interface{}, opts ...EncodeOption) ([]byte, error) {
var buf bytes.Buffer
if err := NewEncoder(&buf, opts...).EncodeContext(ctx, v); err != nil {
- return nil, errors.Wrapf(err, "failed to marshal")
+ return nil, err
}
return buf.Bytes(), nil
}
@@ -148,7 +147,7 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) {
var buf bytes.Buffer
node, err := NewEncoder(&buf, opts...).EncodeToNode(v)
if err != nil {
- return nil, errors.Wrapf(err, "failed to convert value to node")
+ return nil, err
}
return node, nil
}
@@ -161,7 +160,7 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) {
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
+// used to tweak the marshaling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
@@ -192,7 +191,7 @@ func UnmarshalContext(ctx context.Context, data []byte, v interface{}, opts ...D
if err == io.EOF {
return nil
}
- return errors.Wrapf(err, "failed to unmarshal")
+ return err
}
return nil
}
@@ -201,7 +200,7 @@ func UnmarshalContext(ctx context.Context, data []byte, v interface{}, opts ...D
func NodeToValue(node ast.Node, v interface{}, opts ...DecodeOption) error {
var buf bytes.Buffer
if err := NewDecoder(&buf, opts...).DecodeFromNode(node, v); err != nil {
- return errors.Wrapf(err, "failed to convert node to value")
+ return err
}
return nil
}
@@ -214,9 +213,9 @@ func NodeToValue(node ast.Node, v interface{}, opts ...DecodeOption) error {
// contain snippets of the YAML source that was used.
func FormatError(e error, colored, inclSource bool) string {
var pp errors.PrettyPrinter
- if xerrors.As(e, &pp) {
+ if errors.As(e, &pp) {
var buf bytes.Buffer
- pp.PrettyPrint(&errors.Sink{&buf}, colored, inclSource)
+ pp.PrettyPrint(&errors.Sink{Buffer: &buf}, colored, inclSource)
return buf.String()
}
@@ -227,11 +226,11 @@ func FormatError(e error, colored, inclSource bool) string {
func YAMLToJSON(bytes []byte) ([]byte, error) {
var v interface{}
if err := UnmarshalWithOptions(bytes, &v, UseOrderedMap()); err != nil {
- return nil, errors.Wrapf(err, "failed to unmarshal")
+ return nil, err
}
out, err := MarshalWithOptions(v, JSON())
if err != nil {
- return nil, errors.Wrapf(err, "failed to marshal with json option")
+ return nil, err
}
return out, nil
}
@@ -240,11 +239,11 @@ func YAMLToJSON(bytes []byte) ([]byte, error) {
func JSONToYAML(bytes []byte) ([]byte, error) {
var v interface{}
if err := UnmarshalWithOptions(bytes, &v, UseOrderedMap()); err != nil {
- return nil, errors.Wrapf(err, "failed to unmarshal from json bytes")
+ return nil, err
}
out, err := Marshal(v)
if err != nil {
- return nil, errors.Wrapf(err, "failed to marshal")
+ return nil, err
}
return out, nil
}
diff --git a/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go
index a69611e1d3..459e872199 100644
--- a/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go
+++ b/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go
@@ -14,6 +14,11 @@ import (
"github.com/golangci/gofmt/gofmt/internal/diff"
)
+type Options struct {
+ NeedSimplify bool
+ RewriteRules []RewriteRule
+}
+
var parserModeMu sync.RWMutex
type RewriteRule struct {
@@ -73,6 +78,32 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule)
return diff.Diff(oldName, src, newName, res), nil
}
+func Source(filename string, src []byte, opts Options) ([]byte, error) {
+ fset := token.NewFileSet()
+
+ parserModeMu.Lock()
+ initParserMode()
+ parserModeMu.Unlock()
+
+ file, sourceAdj, indentAdj, err := parse(fset, filename, src, false)
+ if err != nil {
+ return nil, err
+ }
+
+ file, err = rewriteFileContent(fset, file, opts.RewriteRules)
+ if err != nil {
+ return nil, err
+ }
+
+ ast.SortImports(fset, file)
+
+ if opts.NeedSimplify {
+ simplify(file)
+ }
+
+ return format(fset, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth})
+}
+
func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) {
for _, rewriteRule := range rewriteRules {
pattern, err := parseExpression(rewriteRule.Pattern, "pattern")
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go
index 413e071d65..bf235bf17f 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "cmp"
"fmt"
"os"
"runtime/debug"
@@ -63,17 +64,9 @@ func createBuildInfo() commands.BuildInfo {
}
}
- if revision == "" {
- revision = "unknown"
- }
-
- if modified == "" {
- modified = "?"
- }
-
- if info.Date == "" {
- info.Date = "(unknown)"
- }
+ revision = cmp.Or(revision, "unknown")
+ modified = cmp.Or(modified, "?")
+ info.Date = cmp.Or(info.Date, "(unknown)")
info.Commit = fmt.Sprintf("(%s, modified: %s, mod sum: %q)", revision, modified, buildInfo.Main.Sum)
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go
index 7bf4f1d660..85899ebc92 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go
@@ -293,7 +293,7 @@ func GetBytes(c Cache, id ActionID) ([]byte, Entry, error) {
}
data, err := robustio.ReadFile(c.OutputFile(entry.OutputID))
if err != nil {
- return nil, entry, err
+ return nil, entry, &entryNotFoundError{Err: err}
}
if sha256.Sum256(data) != entry.OutputID {
return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE
new file mode 100644
index 0000000000..2a7cf70da6
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md
new file mode 100644
index 0000000000..4d221d4ca5
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md
@@ -0,0 +1,8 @@
+# analysisflags
+
+Extracted from `/go/analysis/internal/analysisflags` (related to `checker`).
+This is just a copy of the code without any changes.
+
+## History
+
+- sync with https://github.com/golang/tools/blob/v0.28.0
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go
new file mode 100644
index 0000000000..26a917a991
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysisflags
+
+import (
+ "fmt"
+ "net/url"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+// ResolveURL resolves the URL field for a Diagnostic from an Analyzer
+// and returns the URL. See Diagnostic.URL for details.
+func ResolveURL(a *analysis.Analyzer, d analysis.Diagnostic) (string, error) {
+ if d.URL == "" && d.Category == "" && a.URL == "" {
+ return "", nil // do nothing
+ }
+ raw := d.URL
+ if d.URL == "" && d.Category != "" {
+ raw = "#" + d.Category
+ }
+ u, err := url.Parse(raw)
+ if err != nil {
+ return "", fmt.Errorf("invalid Diagnostic.URL %q: %s", raw, err)
+ }
+ base, err := url.Parse(a.URL)
+ if err != nil {
+ return "", fmt.Errorf("invalid Analyzer.URL %q: %s", a.URL, err)
+ }
+ return base.ResolveReference(u).String(), nil
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go
new file mode 100644
index 0000000000..bb12600dac
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go
@@ -0,0 +1,48 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package analysisinternal provides gopls' internal analyses with a
+// number of helper functions that operate on typed syntax trees.
+package analysisinternal
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+// MakeReadFile returns a simple implementation of the Pass.ReadFile function.
+func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) {
+ return func(filename string) ([]byte, error) {
+ if err := CheckReadable(pass, filename); err != nil {
+ return nil, err
+ }
+ return os.ReadFile(filename)
+ }
+}
+
+// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass].
+func CheckReadable(pass *analysis.Pass, filename string) error {
+ if slicesContains(pass.OtherFiles, filename) ||
+ slicesContains(pass.IgnoredFiles, filename) {
+ return nil
+ }
+ for _, f := range pass.Files {
+ if pass.Fset.File(f.FileStart).Name() == filename {
+ return nil
+ }
+ }
+ return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename)
+}
+
+// TODO(adonovan): use go1.21 slices.Contains.
+func slicesContains[S ~[]E, E comparable](slice S, x E) bool {
+ for _, elem := range slice {
+ if elem == x {
+ return true
+ }
+ }
+ return false
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md
new file mode 100644
index 0000000000..f301cdbebb
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md
@@ -0,0 +1,8 @@
+# analysisinternal
+
+Extracted from `/internal/analysisinternal/` (related to `checker`).
+This is just a copy of the code without any changes.
+
+## History
+
+- sync with https://github.com/golang/tools/blob/v0.28.0
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go
new file mode 100644
index 0000000000..a13547b7a7
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go
@@ -0,0 +1,176 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff computes differences between text files or strings.
+package diff
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// An Edit describes the replacement of a portion of a text file.
+type Edit struct {
+ Start, End int // byte offsets of the region to replace
+ New string // the replacement
+}
+
+func (e Edit) String() string {
+ return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New)
+}
+
+// Apply applies a sequence of edits to the src buffer and returns the
+// result. Edits are applied in order of start offset; edits with the
+// same start offset are applied in they order they were provided.
+//
+// Apply returns an error if any edit is out of bounds,
+// or if any pair of edits is overlapping.
+func Apply(src string, edits []Edit) (string, error) {
+ edits, size, err := validate(src, edits)
+ if err != nil {
+ return "", err
+ }
+
+ // Apply edits.
+ out := make([]byte, 0, size)
+ lastEnd := 0
+ for _, edit := range edits {
+ if lastEnd < edit.Start {
+ out = append(out, src[lastEnd:edit.Start]...)
+ }
+ out = append(out, edit.New...)
+ lastEnd = edit.End
+ }
+ out = append(out, src[lastEnd:]...)
+
+ if len(out) != size {
+ panic("wrong size")
+ }
+
+ return string(out), nil
+}
+
+// ApplyBytes is like Apply, but it accepts a byte slice.
+// The result is always a new array.
+func ApplyBytes(src []byte, edits []Edit) ([]byte, error) {
+ res, err := Apply(string(src), edits)
+ return []byte(res), err
+}
+
+// validate checks that edits are consistent with src,
+// and returns the size of the patched output.
+// It may return a different slice.
+func validate(src string, edits []Edit) ([]Edit, int, error) {
+ if !sort.IsSorted(editsSort(edits)) {
+ edits = append([]Edit(nil), edits...)
+ SortEdits(edits)
+ }
+
+ // Check validity of edits and compute final size.
+ size := len(src)
+ lastEnd := 0
+ for _, edit := range edits {
+ if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) {
+ return nil, 0, fmt.Errorf("diff has out-of-bounds edits")
+ }
+ if edit.Start < lastEnd {
+ return nil, 0, fmt.Errorf("diff has overlapping edits")
+ }
+ size += len(edit.New) + edit.Start - edit.End
+ lastEnd = edit.End
+ }
+
+ return edits, size, nil
+}
+
+// SortEdits orders a slice of Edits by (start, end) offset.
+// This ordering puts insertions (end = start) before deletions
+// (end > start) at the same point, but uses a stable sort to preserve
+// the order of multiple insertions at the same point.
+// (Apply detects multiple deletions at the same point as an error.)
+func SortEdits(edits []Edit) {
+ sort.Stable(editsSort(edits))
+}
+
+type editsSort []Edit
+
+func (a editsSort) Len() int { return len(a) }
+func (a editsSort) Less(i, j int) bool {
+ if cmp := a[i].Start - a[j].Start; cmp != 0 {
+ return cmp < 0
+ }
+ return a[i].End < a[j].End
+}
+func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// lineEdits expands and merges a sequence of edits so that each
+// resulting edit replaces one or more complete lines.
+// See ApplyEdits for preconditions.
+func lineEdits(src string, edits []Edit) ([]Edit, error) {
+ edits, _, err := validate(src, edits)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do all deletions begin and end at the start of a line,
+ // and all insertions end with a newline?
+ // (This is merely a fast path.)
+ for _, edit := range edits {
+ if edit.Start >= len(src) || // insertion at EOF
+ edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start
+ edit.End > 0 && src[edit.End-1] != '\n' || // not at line start
+ edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert
+ goto expand // slow path
+ }
+ }
+ return edits, nil // aligned
+
+expand:
+ if len(edits) == 0 {
+ return edits, nil // no edits (unreachable due to fast path)
+ }
+ expanded := make([]Edit, 0, len(edits)) // a guess
+ prev := edits[0]
+ // TODO(adonovan): opt: start from the first misaligned edit.
+ // TODO(adonovan): opt: avoid quadratic cost of string += string.
+ for _, edit := range edits[1:] {
+ between := src[prev.End:edit.Start]
+ if !strings.Contains(between, "\n") {
+ // overlapping lines: combine with previous edit.
+ prev.New += between + edit.New
+ prev.End = edit.End
+ } else {
+ // non-overlapping lines: flush previous edit.
+ expanded = append(expanded, expandEdit(prev, src))
+ prev = edit
+ }
+ }
+ return append(expanded, expandEdit(prev, src)), nil // flush final edit
+}
+
+// expandEdit returns edit expanded to complete whole lines.
+func expandEdit(edit Edit, src string) Edit {
+ // Expand start left to start of line.
+ // (delta is the zero-based column number of start.)
+ start := edit.Start
+ if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 {
+ edit.Start -= delta
+ edit.New = src[start-delta:start] + edit.New
+ }
+
+ // Expand end right to end of line.
+ end := edit.End
+ if end > 0 && src[end-1] != '\n' ||
+ edit.New != "" && edit.New[len(edit.New)-1] != '\n' {
+ if nl := strings.IndexByte(src[end:], '\n'); nl < 0 {
+ edit.End = len(src) // extend to EOF
+ } else {
+ edit.End = end + nl + 1 // extend beyond \n
+ }
+ }
+ edit.New += src[end:edit.End]
+
+ return edit
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go
new file mode 100644
index 0000000000..c3e82dd268
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go
@@ -0,0 +1,179 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+ "log"
+ "sort"
+)
+
+// lcs is a longest common sequence
+type lcs []diag
+
+// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len
+ })
+ return l
+}
+
+// validate that the elements of the lcs do not overlap
+// (can only happen when the two-sided algorithm ends early)
+// expects the lcs to be sorted
+func (l lcs) valid() bool {
+ for i := 1; i < len(l); i++ {
+ if l[i-1].X+l[i-1].Len > l[i].X {
+ return false
+ }
+ if l[i-1].Y+l[i-1].Len > l[i].Y {
+ return false
+ }
+ }
+ return true
+}
+
+// repair overlapping lcs
+// only called if two-sided stops early
+func (l lcs) fix() lcs {
+ // from the set of diagonals in l, find a maximal non-conflicting set
+ // this problem may be NP-complete, but we use a greedy heuristic,
+ // which is quadratic, but with a better data structure, could be D log D.
+ // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs
+ // which has to have monotone x and y
+ if len(l) == 0 {
+ return nil
+ }
+ sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len })
+ tmp := make(lcs, 0, len(l))
+ tmp = append(tmp, l[0])
+ for i := 1; i < len(l); i++ {
+ var dir direction
+ nxt := l[i]
+ for _, in := range tmp {
+ if dir, nxt = overlap(in, nxt); dir == empty || dir == bad {
+ break
+ }
+ }
+ if nxt.Len > 0 && dir != bad {
+ tmp = append(tmp, nxt)
+ }
+ }
+ tmp.sort()
+ if false && !tmp.valid() { // debug checking
+ log.Fatalf("here %d", len(tmp))
+ }
+ return tmp
+}
+
+type direction int
+
+const (
+ empty direction = iota // diag is empty (so not in lcs)
+ leftdown // proposed acceptably to the left and below
+ rightup // proposed diag is acceptably to the right and above
+ bad // proposed diag is inconsistent with the lcs so far
+)
+
+// overlap trims the proposed diag prop so it doesn't overlap with
+// the existing diag that has already been added to the lcs.
+func overlap(exist, prop diag) (direction, diag) {
+ if prop.X <= exist.X && exist.X < prop.X+prop.Len {
+ // remove the end of prop where it overlaps with the X end of exist
+ delta := prop.X + prop.Len - exist.X
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ }
+ if exist.X <= prop.X && prop.X < exist.X+exist.Len {
+ // remove the beginning of prop where overlaps with exist
+ delta := exist.X + exist.Len - prop.X
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ prop.X += delta
+ prop.Y += delta
+ }
+ if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len {
+ // remove the end of prop that overlaps (in Y) with exist
+ delta := prop.Y + prop.Len - exist.Y
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ }
+ if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len {
+ // remove the beginning of peop that overlaps with exist
+ delta := exist.Y + exist.Len - prop.Y
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ prop.X += delta // no test reaches this code
+ prop.Y += delta
+ }
+ if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y {
+ return leftdown, prop
+ }
+ if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y {
+ return rightup, prop
+ }
+ // prop can't be in an lcs that contains exist
+ return bad, prop
+}
+
+// manipulating Diag and lcs
+
+// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs
+// or to its first Diag. prepend is only called to extend diagonals
+// the backward direction.
+func (lcs lcs) prepend(x, y int) lcs {
+ if len(lcs) > 0 {
+ d := &lcs[0]
+ if int(d.X) == x+1 && int(d.Y) == y+1 {
+ // extend the diagonal down and to the left
+ d.X, d.Y = int(x), int(y)
+ d.Len++
+ return lcs
+ }
+ }
+
+ r := diag{X: int(x), Y: int(y), Len: 1}
+ lcs = append([]diag{r}, lcs...)
+ return lcs
+}
+
+// append appends a diagonal, or extends the existing one.
+// by adding the edge (x,y)-(x+1.y+1). append is only called
+// to extend diagonals in the forward direction.
+func (lcs lcs) append(x, y int) lcs {
+ if len(lcs) > 0 {
+ last := &lcs[len(lcs)-1]
+ // Expand last element if adjoining.
+ if last.X+last.Len == x && last.Y+last.Len == y {
+ last.Len++
+ return lcs
+ }
+ }
+
+ return append(lcs, diag{X: x, Y: y, Len: 1})
+}
+
+// enforce constraint on d, k
+func ok(d, k int) bool {
+ return d >= 0 && -d <= k && k <= d
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go
new file mode 100644
index 0000000000..9029dd20b3
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package lcs contains code to find longest-common-subsequences
+// (and diffs)
+package lcs
+
+/*
+Compute longest-common-subsequences of two slices A, B using
+algorithms from Myers' paper. A longest-common-subsequence
+(LCS from now on) of A and B is a maximal set of lexically increasing
+pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but
+they all have the same length. An LCS determines a sequence of edits
+that changes A into B.
+
+The key concept is the edit graph of A and B.
+If A has length N and B has length M, then the edit graph has
+vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a
+horizontal edge from v[i][j] to v[i+1][j] whenever both are in
+the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly.
+When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1].
+
+A path between in the graph between (0,0) and (N,M) determines a sequence
+of edits converting A into B: each horizontal edge corresponds to removing
+an element of A, and each vertical edge corresponds to inserting an
+element of B.
+
+A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph
+is of length D if it has D non-diagonal edges. The algorithms generate
+forward paths (in which at least one of x,y increases at each edge),
+or backward paths (in which at least one of x,y decreases at each edge),
+or a combination. (Note that the orientation is the traditional mathematical one,
+with the origin in the lower-left corner.)
+
+Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.)
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ b | | | ___/‾‾‾ | ___/‾‾‾ | | |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ c | | | | | | |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a a b b a a
+
+
+The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at
+the end of a maximal path of length D. (Because x-y=k it suffices to remember
+only the x coordinate of the vertex.)
+
+The forward algorithm: Find the longest diagonal starting at (0,0) and
+label its end with D=0,k=0. From that vertex take a vertical step and
+then follow the longest diagonal (up and to the right), and label that vertex
+with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow
+the longest diagonal (up and to the right) and label that vertex
+D=1,k=1. In the same way, having labelled all the D vertices,
+from a vertex labelled D,k find two vertices
+tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same
+diagonal, in which case take the one with the larger x.
+
+Eventually the path gets to (N,M), and the diagonals on it are the LCS.
+
+Here is the edit graph with the ends of D-paths labelled. (So, for instance,
+0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first
+step is to go up the longest diagonal from (0,0).)
+A:"aabbaa", B:"aacaba"
+ ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6)
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙
+ b | | | ___/‾‾‾ | ___/‾‾‾ | | |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙
+ c | | | | | | |
+ ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2)
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a a b b a a
+
+The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical
+to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected,
+there are 4 non-diagonal steps, and the diagonals form an LCS.
+
+There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon):
+A:"aabbaa", B:"aacaba"
+ ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙
+ b | | | ____/‾‾‾ | ____/‾‾‾ | | |
+ ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙
+ c | | | | | | |
+ ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙
+ a a b b a a
+
+Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the
+front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short.
+We want to control how big D can be, by stopping when it gets too large. The forward algorithm then
+privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable
+asymmetry.
+
+Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in
+the edit graph look like.
+A:"aabbaa", B:"aacaba"
+ ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙
+ b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | |
+ ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙
+ c | | | | | | |
+ ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙
+ a a b b a a
+
+The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion
+is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same
+diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward
+2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path.
+Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the
+computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed
+from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path.
+
+If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a
+backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two
+computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS
+is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two
+to form a best-effort LCS. In the worst case the forward partial LCS may have to
+be recomputed.
+*/
+
+/* Eugene Myers paper is titled
+"An O(ND) Difference Algorithm and Its Variations"
+and can be found at
+http://www.xmailserver.org/diff2.pdf
+
+(There is a generic implementation of the algorithm the repository with git hash
+b9ad7e4ade3a686d608e44475390ad428e60e7fc)
+*/
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh
new file mode 100644
index 0000000000..b25ba4aac7
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2022 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+#
+# Creates a zip file containing all numbered versions
+# of the commit history of a large source file, for use
+# as input data for the tests of the diff algorithm.
+#
+# Run script from root of the x/tools repo.
+
+set -eu
+
+# WARNING: This script will install the latest version of $file
+# The largest real source file in the x/tools repo.
+# file=internal/golang/completion/completion.go
+# file=internal/golang/diagnostics.go
+file=internal/protocol/tsprotocol.go
+
+tmp=$(mktemp -d)
+git log $file |
+ awk '/^commit / {print $2}' |
+ nl -ba -nrz |
+ while read n hash; do
+ git checkout --quiet $hash $file
+ cp -f $file $tmp/$n
+ done
+(cd $tmp && zip -q - *) > testdata.zip
+rm -fr $tmp
+git restore --staged $file
+git restore $file
+echo "Created testdata.zip"
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go
new file mode 100644
index 0000000000..504913d1da
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+ "fmt"
+)
+
+// For each D, vec[D] has length D+1,
+// and the label for (D, k) is stored in vec[D][(D+k)/2].
+type label struct {
+ vec [][]int
+}
+
+// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE
+const debug = false
+
+// debugging. check that the (d,k) pair is valid
+// (that is, -d<=k<=d and d+k even)
+func checkDK(D, k int) {
+ if k >= -D && k <= D && (D+k)%2 == 0 {
+ return
+ }
+ panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k))
+}
+
+func (t *label) set(D, k, x int) {
+ if debug {
+ checkDK(D, k)
+ }
+ for len(t.vec) <= D {
+ t.vec = append(t.vec, nil)
+ }
+ if t.vec[D] == nil {
+ t.vec[D] = make([]int, D+1)
+ }
+ t.vec[D][(D+k)/2] = x // known that D+k is even
+}
+
+func (t *label) get(d, k int) int {
+ if debug {
+ checkDK(d, k)
+ }
+ return int(t.vec[d][(d+k)/2])
+}
+
+func newtriang(limit int) label {
+ if limit < 100 {
+ // Preallocate if limit is not large.
+ return label{vec: make([][]int, limit)}
+ }
+ return label{}
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go
new file mode 100644
index 0000000000..4353da15ba
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go
@@ -0,0 +1,480 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+// TODO(adonovan): remove unclear references to "old" in this package.
+
+import (
+ "fmt"
+)
+
+// A Diff is a replacement of a portion of A by a portion of B.
+type Diff struct {
+ Start, End int // offsets of portion to delete in A
+ ReplStart, ReplEnd int // offset of replacement text in B
+}
+
+// DiffStrings returns the differences between two strings.
+// It does not respect rune boundaries.
+func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) }
+
+// DiffBytes returns the differences between two byte sequences.
+// It does not respect rune boundaries.
+func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) }
+
+// DiffRunes returns the differences between two rune sequences.
+func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) }
+
+func diff(seqs sequences) []Diff {
+ // A limit on how deeply the LCS algorithm should search. The value is just a guess.
+ const maxDiffs = 100
+ diff, _ := compute(seqs, twosided, maxDiffs/2)
+ return diff
+}
+
+// compute computes the list of differences between two sequences,
+// along with the LCS. It is exercised directly by tests.
+// The algorithm is one of {forward, backward, twosided}.
+func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) {
+ if limit <= 0 {
+ limit = 1 << 25 // effectively infinity
+ }
+ alen, blen := seqs.lengths()
+ g := &editGraph{
+ seqs: seqs,
+ vf: newtriang(limit),
+ vb: newtriang(limit),
+ limit: limit,
+ ux: alen,
+ uy: blen,
+ delta: alen - blen,
+ }
+ lcs := algo(g)
+ diffs := lcs.toDiffs(alen, blen)
+ return diffs, lcs
+}
+
+// editGraph carries the information for computing the lcs of two sequences.
+type editGraph struct {
+ seqs sequences
+ vf, vb label // forward and backward labels
+
+ limit int // maximal value of D
+ // the bounding rectangle of the current edit graph
+ lx, ly, ux, uy int
+ delta int // common subexpression: (ux-lx)-(uy-ly)
+}
+
+// toDiffs converts an LCS to a list of edits.
+func (lcs lcs) toDiffs(alen, blen int) []Diff {
+ var diffs []Diff
+ var pa, pb int // offsets in a, b
+ for _, l := range lcs {
+ if pa < l.X || pb < l.Y {
+ diffs = append(diffs, Diff{pa, l.X, pb, l.Y})
+ }
+ pa = l.X + l.Len
+ pb = l.Y + l.Len
+ }
+ if pa < alen || pb < blen {
+ diffs = append(diffs, Diff{pa, alen, pb, blen})
+ }
+ return diffs
+}
+
+// --- FORWARD ---
+
+// fdone decides if the forward path has reached the upper right
+// corner of the rectangle. If so, it also returns the computed lcs.
+func (e *editGraph) fdone(D, k int) (bool, lcs) {
+ // x, y, k are relative to the rectangle
+ x := e.vf.get(D, k)
+ y := x - k
+ if x == e.ux && y == e.uy {
+ return true, e.forwardlcs(D, k)
+ }
+ return false, nil
+}
+
+// run the forward algorithm, until success or up to the limit on D.
+func forward(e *editGraph) lcs {
+ e.setForward(0, 0, e.lx)
+ if ok, ans := e.fdone(0, 0); ok {
+ return ans
+ }
+ // from D to D+1
+ for D := 0; D < e.limit; D++ {
+ e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+ if ok, ans := e.fdone(D+1, -(D + 1)); ok {
+ return ans
+ }
+ e.setForward(D+1, D+1, e.getForward(D, D)+1)
+ if ok, ans := e.fdone(D+1, D+1); ok {
+ return ans
+ }
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get backwards
+ lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+ lookh := e.lookForward(k, e.getForward(D, k+1))
+ if lookv > lookh {
+ e.setForward(D+1, k, lookv)
+ } else {
+ e.setForward(D+1, k, lookh)
+ }
+ if ok, ans := e.fdone(D+1, k); ok {
+ return ans
+ }
+ }
+ }
+ // D is too large
+ // find the D path with maximal x+y inside the rectangle and
+ // use that to compute the found part of the lcs
+ kmax := -e.limit - 1
+ diagmax := -1
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getForward(e.limit, k)
+ y := x - k
+ if x+y > diagmax && x <= e.ux && y <= e.uy {
+ diagmax, kmax = x+y, k
+ }
+ }
+ return e.forwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking from the farthest point reached
+func (e *editGraph) forwardlcs(D, k int) lcs {
+ var ans lcs
+ for x := e.getForward(D, k); x != 0 || x-k != 0; {
+ if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) {
+ // if (x-1,y) is labelled D-1, x--,D--,k--,continue
+ D, k, x = D-1, k-1, x-1
+ continue
+ } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) {
+ // if (x,y-1) is labelled D-1, x, D--,k++, continue
+ D, k = D-1, k+1
+ continue
+ }
+ // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue
+ y := x - k
+ ans = ans.prepend(x+e.lx-1, y+e.ly-1)
+ x--
+ }
+ return ans
+}
+
+// start at (x,y), go up the diagonal as far as possible,
+// and label the result with d
+func (e *editGraph) lookForward(k, relx int) int {
+ rely := relx - k
+ x, y := relx+e.lx, rely+e.ly
+ if x < e.ux && y < e.uy {
+ x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy)
+ }
+ return x
+}
+
+func (e *editGraph) setForward(d, k, relx int) {
+ x := e.lookForward(k, relx)
+ e.vf.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getForward(d, k int) int {
+ x := e.vf.get(d, k)
+ return x
+}
+
+// --- BACKWARD ---
+
+// bdone decides if the backward path has reached the lower left corner
+func (e *editGraph) bdone(D, k int) (bool, lcs) {
+ // x, y, k are relative to the rectangle
+ x := e.vb.get(D, k)
+ y := x - (k + e.delta)
+ if x == 0 && y == 0 {
+ return true, e.backwardlcs(D, k)
+ }
+ return false, nil
+}
+
+// run the backward algorithm, until success or up to the limit on D.
+func backward(e *editGraph) lcs {
+ e.setBackward(0, 0, e.ux)
+ if ok, ans := e.bdone(0, 0); ok {
+ return ans
+ }
+ // from D to D+1
+ for D := 0; D < e.limit; D++ {
+ e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+ if ok, ans := e.bdone(D+1, -(D + 1)); ok {
+ return ans
+ }
+ e.setBackward(D+1, D+1, e.getBackward(D, D))
+ if ok, ans := e.bdone(D+1, D+1); ok {
+ return ans
+ }
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get wrong
+ lookv := e.lookBackward(k, e.getBackward(D, k-1))
+ lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+ if lookv < lookh {
+ e.setBackward(D+1, k, lookv)
+ } else {
+ e.setBackward(D+1, k, lookh)
+ }
+ if ok, ans := e.bdone(D+1, k); ok {
+ return ans
+ }
+ }
+ }
+
+ // D is too large
+ // find the D path with minimal x+y inside the rectangle and
+ // use that to compute the part of the lcs found
+ kmax := -e.limit - 1
+ diagmin := 1 << 25
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getBackward(e.limit, k)
+ y := x - (k + e.delta)
+ if x+y < diagmin && x >= 0 && y >= 0 {
+ diagmin, kmax = x+y, k
+ }
+ }
+ if kmax < -e.limit {
+ panic(fmt.Sprintf("no paths when limit=%d?", e.limit))
+ }
+ return e.backwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking
+func (e *editGraph) backwardlcs(D, k int) lcs {
+ var ans lcs
+ for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; {
+ if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) {
+ // D--, k--, x unchanged
+ D, k = D-1, k-1
+ continue
+ } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) {
+ // D--, k++, x++
+ D, k, x = D-1, k+1, x+1
+ continue
+ }
+ y := x - (k + e.delta)
+ ans = ans.append(x+e.lx, y+e.ly)
+ x++
+ }
+ return ans
+}
+
+// start at (x,y), go down the diagonal as far as possible,
+func (e *editGraph) lookBackward(k, relx int) int {
+ rely := relx - (k + e.delta) // forward k = k + e.delta
+ x, y := relx+e.lx, rely+e.ly
+ if x > 0 && y > 0 {
+ x -= e.seqs.commonSuffixLen(0, x, 0, y)
+ }
+ return x
+}
+
+// convert to rectangle, and label the result with d
+func (e *editGraph) setBackward(d, k, relx int) {
+ x := e.lookBackward(k, relx)
+ e.vb.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getBackward(d, k int) int {
+ x := e.vb.get(d, k)
+ return x
+}
+
+// -- TWOSIDED ---
+
+func twosided(e *editGraph) lcs {
+ // The termination condition could be improved, as either the forward
+ // or backward pass could succeed before Myers' Lemma applies.
+ // Aside from questions of efficiency (is the extra testing cost-effective)
+ // this is more likely to matter when e.limit is reached.
+ e.setForward(0, 0, e.lx)
+ e.setBackward(0, 0, e.ux)
+
+ // from D to D+1
+ for D := 0; D < e.limit; D++ {
+ // just finished a backwards pass, so check
+ if got, ok := e.twoDone(D, D); ok {
+ return e.twolcs(D, D, got)
+ }
+ // do a forwards pass (D to D+1)
+ e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+ e.setForward(D+1, D+1, e.getForward(D, D)+1)
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get backwards
+ lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+ lookh := e.lookForward(k, e.getForward(D, k+1))
+ if lookv > lookh {
+ e.setForward(D+1, k, lookv)
+ } else {
+ e.setForward(D+1, k, lookh)
+ }
+ }
+ // just did a forward pass, so check
+ if got, ok := e.twoDone(D+1, D); ok {
+ return e.twolcs(D+1, D, got)
+ }
+ // do a backward pass, D to D+1
+ e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+ e.setBackward(D+1, D+1, e.getBackward(D, D))
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get wrong
+ lookv := e.lookBackward(k, e.getBackward(D, k-1))
+ lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+ if lookv < lookh {
+ e.setBackward(D+1, k, lookv)
+ } else {
+ e.setBackward(D+1, k, lookh)
+ }
+ }
+ }
+
+ // D too large. combine a forward and backward partial lcs
+ // first, a forward one
+ kmax := -e.limit - 1
+ diagmax := -1
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getForward(e.limit, k)
+ y := x - k
+ if x+y > diagmax && x <= e.ux && y <= e.uy {
+ diagmax, kmax = x+y, k
+ }
+ }
+ if kmax < -e.limit {
+ panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit))
+ }
+ lcs := e.forwardlcs(e.limit, kmax)
+ // now a backward one
+ // find the D path with minimal x+y inside the rectangle and
+ // use that to compute the lcs
+ diagmin := 1 << 25 // infinity
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getBackward(e.limit, k)
+ y := x - (k + e.delta)
+ if x+y < diagmin && x >= 0 && y >= 0 {
+ diagmin, kmax = x+y, k
+ }
+ }
+ if kmax < -e.limit {
+ panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit))
+ }
+ lcs = append(lcs, e.backwardlcs(e.limit, kmax)...)
+ // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs)
+ ans := lcs.fix()
+ return ans
+}
+
+// Does Myers' Lemma apply?
+func (e *editGraph) twoDone(df, db int) (int, bool) {
+ if (df+db+e.delta)%2 != 0 {
+ return 0, false // diagonals cannot overlap
+ }
+ kmin := -db + e.delta
+ if -df > kmin {
+ kmin = -df
+ }
+ kmax := db + e.delta
+ if df < kmax {
+ kmax = df
+ }
+ for k := kmin; k <= kmax; k += 2 {
+ x := e.vf.get(df, k)
+ u := e.vb.get(db, k-e.delta)
+ if u <= x {
+ // is it worth looking at all the other k?
+ for l := k; l <= kmax; l += 2 {
+ x := e.vf.get(df, l)
+ y := x - l
+ u := e.vb.get(db, l-e.delta)
+ v := u - l
+ if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux {
+ return l, true
+ }
+ }
+ return k, true
+ }
+ }
+ return 0, false
+}
+
+func (e *editGraph) twolcs(df, db, kf int) lcs {
+ // db==df || db+1==df
+ x := e.vf.get(df, kf)
+ y := x - kf
+ kb := kf - e.delta
+ u := e.vb.get(db, kb)
+ v := u - kf
+
+ // Myers proved there is a df-path from (0,0) to (u,v)
+ // and a db-path from (x,y) to (N,M).
+ // In the first case the overall path is the forward path
+ // to (u,v) followed by the backward path to (N,M).
+ // In the second case the path is the backward path to (x,y)
+ // followed by the forward path to (x,y) from (0,0).
+
+ // Look for some special cases to avoid computing either of these paths.
+ if x == u {
+ // "babaab" "cccaba"
+ // already patched together
+ lcs := e.forwardlcs(df, kf)
+ lcs = append(lcs, e.backwardlcs(db, kb)...)
+ return lcs.sort()
+ }
+
+ // is (u-1,v) or (u,v-1) labelled df-1?
+ // if so, that forward df-1-path plus a horizontal or vertical edge
+ // is the df-path to (u,v), then plus the db-path to (N,M)
+ if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 {
+ // "aabbab" "cbcabc"
+ lcs := e.forwardlcs(df-1, u-1-v)
+ lcs = append(lcs, e.backwardlcs(db, kb)...)
+ return lcs.sort()
+ }
+ if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u {
+ // "abaabb" "bcacab"
+ lcs := e.forwardlcs(df-1, u-(v-1))
+ lcs = append(lcs, e.backwardlcs(db, kb)...)
+ return lcs.sort()
+ }
+
+ // The path can't possibly contribute to the lcs because it
+ // is all horizontal or vertical edges
+ if u == 0 || v == 0 || x == e.ux || y == e.uy {
+ // "abaabb" "abaaaa"
+ if u == 0 || v == 0 {
+ return e.backwardlcs(db, kb)
+ }
+ return e.forwardlcs(df, kf)
+ }
+
+ // is (x+1,y) or (x,y+1) labelled db-1?
+ if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 {
+ // "bababb" "baaabb"
+ lcs := e.backwardlcs(db-1, kb+1)
+ lcs = append(lcs, e.forwardlcs(df, kf)...)
+ return lcs.sort()
+ }
+ if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x {
+ // "abbbaa" "cabacc"
+ lcs := e.backwardlcs(db-1, kb-1)
+ lcs = append(lcs, e.forwardlcs(df, kf)...)
+ return lcs.sort()
+ }
+
+ // need to compute another path
+ // "aabbaa" "aacaba"
+ lcs := e.backwardlcs(db, kb)
+ oldx, oldy := e.ux, e.uy
+ e.ux = u
+ e.uy = v
+ lcs = append(lcs, forward(e)...)
+ e.ux, e.uy = oldx, oldy
+ return lcs.sort()
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go
new file mode 100644
index 0000000000..2d72d26304
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go
@@ -0,0 +1,113 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+// This file defines the abstract sequence over which the LCS algorithm operates.
+
+// sequences abstracts a pair of sequences, A and B.
+type sequences interface {
+ lengths() (int, int) // len(A), len(B)
+ commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj]))
+ commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj]))
+}
+
+type stringSeqs struct{ a, b string }
+
+func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) }
+func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int {
+ return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj])
+}
+func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int {
+ return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj])
+}
+
+// The explicit capacity in s[i:j:j] leads to more efficient code.
+
+type bytesSeqs struct{ a, b []byte }
+
+func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) }
+func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int {
+ return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int {
+ return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+
+type runesSeqs struct{ a, b []rune }
+
+func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) }
+func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int {
+ return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int {
+ return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+
+// TODO(adonovan): optimize these functions using ideas from:
+// - https://go.dev/cl/408116 common.go
+// - https://go.dev/cl/421435 xor_generic.go
+
+// TODO(adonovan): factor using generics when available,
+// but measure performance impact.
+
+// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj].
+func commonPrefixLenBytes(a, b []byte) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+func commonPrefixLenRunes(a, b []rune) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+func commonPrefixLenString(a, b string) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj].
+func commonSuffixLenBytes(a, b []byte) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[len(a)-1-i] == b[len(b)-1-i] {
+ i++
+ }
+ return i
+}
+func commonSuffixLenRunes(a, b []rune) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[len(a)-1-i] == b[len(b)-1-i] {
+ i++
+ }
+ return i
+}
+func commonSuffixLenString(a, b string) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[len(a)-1-i] == b[len(b)-1-i] {
+ i++
+ }
+ return i
+}
+
+func min(x, y int) int {
+ if x < y {
+ return x
+ } else {
+ return y
+ }
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go
new file mode 100644
index 0000000000..f7aa2b79f6
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+ "bytes"
+ "unicode/utf8"
+
+ "github.com/golangci/golangci-lint/internal/x/tools/diff/lcs"
+)
+
+// Strings computes the differences between two strings.
+// The resulting edits respect rune boundaries.
+func Strings(before, after string) []Edit {
+ if before == after {
+ return nil // common case
+ }
+
+ if isASCII(before) && isASCII(after) {
+ // TODO(adonovan): opt: specialize diffASCII for strings.
+ return diffASCII([]byte(before), []byte(after))
+ }
+ return diffRunes([]rune(before), []rune(after))
+}
+
+// Bytes computes the differences between two byte slices.
+// The resulting edits respect rune boundaries.
+func Bytes(before, after []byte) []Edit {
+ if bytes.Equal(before, after) {
+ return nil // common case
+ }
+
+ if isASCII(before) && isASCII(after) {
+ return diffASCII(before, after)
+ }
+ return diffRunes(runes(before), runes(after))
+}
+
+func diffASCII(before, after []byte) []Edit {
+ diffs := lcs.DiffBytes(before, after)
+
+ // Convert from LCS diffs.
+ res := make([]Edit, len(diffs))
+ for i, d := range diffs {
+ res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])}
+ }
+ return res
+}
+
+func diffRunes(before, after []rune) []Edit {
+ diffs := lcs.DiffRunes(before, after)
+
+ // The diffs returned by the lcs package use indexes
+ // into whatever slice was passed in.
+ // Convert rune offsets to byte offsets.
+ res := make([]Edit, len(diffs))
+ lastEnd := 0
+ utf8Len := 0
+ for i, d := range diffs {
+ utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits
+ start := utf8Len
+ utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit
+ res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])}
+ lastEnd = d.End
+ }
+ return res
+}
+
+// runes is like []rune(string(bytes)) without the duplicate allocation.
+func runes(bytes []byte) []rune {
+ n := utf8.RuneCount(bytes)
+ runes := make([]rune, n)
+ for i := 0; i < n; i++ {
+ r, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ runes[i] = r
+ }
+ return runes
+}
+
+// runesLen returns the length in bytes of the UTF-8 encoding of runes.
+func runesLen(runes []rune) (len int) {
+ for _, r := range runes {
+ len += utf8.RuneLen(r)
+ }
+ return len
+}
+
+// isASCII reports whether s contains only ASCII.
+func isASCII[S string | []byte](s S) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md
new file mode 100644
index 0000000000..4b97984989
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md
@@ -0,0 +1,8 @@
+# diff
+
+Extracted from `/internal/diff/` (related to `fixer`).
+This is just a copy of the code without any changes.
+
+## History
+
+- sync with https://github.com/golang/tools/blob/v0.28.0
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go
new file mode 100644
index 0000000000..cfbda61020
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go
@@ -0,0 +1,251 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// DefaultContextLines is the number of unchanged lines of surrounding
+// context displayed by Unified. Use ToUnified to specify a different value.
+const DefaultContextLines = 3
+
+// Unified returns a unified diff of the old and new strings.
+// The old and new labels are the names of the old and new files.
+// If the strings are equal, it returns the empty string.
+func Unified(oldLabel, newLabel, old, new string) string {
+ edits := Strings(old, new)
+ unified, err := ToUnified(oldLabel, newLabel, old, edits, DefaultContextLines)
+ if err != nil {
+ // Can't happen: edits are consistent.
+ log.Fatalf("internal error in diff.Unified: %v", err)
+ }
+ return unified
+}
+
+// ToUnified applies the edits to content and returns a unified diff,
+// with contextLines lines of (unchanged) context around each diff hunk.
+// The old and new labels are the names of the content and result files.
+// It returns an error if the edits are inconsistent; see ApplyEdits.
+func ToUnified(oldLabel, newLabel, content string, edits []Edit, contextLines int) (string, error) {
+ u, err := toUnified(oldLabel, newLabel, content, edits, contextLines)
+ if err != nil {
+ return "", err
+ }
+ return u.String(), nil
+}
+
+// unified represents a set of edits as a unified diff.
+type unified struct {
+ // from is the name of the original file.
+ from string
+ // to is the name of the modified file.
+ to string
+ // hunks is the set of edit hunks needed to transform the file content.
+ hunks []*hunk
+}
+
+// Hunk represents a contiguous set of line edits to apply.
+type hunk struct {
+ // The line in the original source where the hunk starts.
+ fromLine int
+ // The line in the original source where the hunk finishes.
+ toLine int
+ // The set of line based edits to apply.
+ lines []line
+}
+
+// Line represents a single line operation to apply as part of a Hunk.
+type line struct {
+ // kind is the type of line this represents, deletion, insertion or copy.
+ kind opKind
+ // content is the content of this line.
+ // For deletion it is the line being removed, for all others it is the line
+ // to put in the output.
+ content string
+}
+
+// opKind is used to denote the type of operation a line represents.
+type opKind int
+
+const (
+ // opDelete is the operation kind for a line that is present in the input
+ // but not in the output.
+ opDelete opKind = iota
+ // opInsert is the operation kind for a line that is new in the output.
+ opInsert
+ // opEqual is the operation kind for a line that is the same in the input and
+ // output, often used to provide context around edited lines.
+ opEqual
+)
+
+// String returns a human readable representation of an OpKind. It is not
+// intended for machine processing.
+func (k opKind) String() string {
+ switch k {
+ case opDelete:
+ return "delete"
+ case opInsert:
+ return "insert"
+ case opEqual:
+ return "equal"
+ default:
+ panic("unknown operation kind")
+ }
+}
+
+// toUnified takes a file contents and a sequence of edits, and calculates
+// a unified diff that represents those edits.
+func toUnified(fromName, toName string, content string, edits []Edit, contextLines int) (unified, error) {
+ gap := contextLines * 2
+ u := unified{
+ from: fromName,
+ to: toName,
+ }
+ if len(edits) == 0 {
+ return u, nil
+ }
+ var err error
+ edits, err = lineEdits(content, edits) // expand to whole lines
+ if err != nil {
+ return u, err
+ }
+ lines := splitLines(content)
+ var h *hunk
+ last := 0
+ toLine := 0
+ for _, edit := range edits {
+ // Compute the zero-based line numbers of the edit start and end.
+ // TODO(adonovan): opt: compute incrementally, avoid O(n^2).
+ start := strings.Count(content[:edit.Start], "\n")
+ end := strings.Count(content[:edit.End], "\n")
+ if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' {
+ end++ // EOF counts as an implicit newline
+ }
+
+ switch {
+ case h != nil && start == last:
+ //direct extension
+ case h != nil && start <= last+gap:
+ //within range of previous lines, add the joiners
+ addEqualLines(h, lines, last, start)
+ default:
+ //need to start a new hunk
+ if h != nil {
+ // add the edge to the previous hunk
+ addEqualLines(h, lines, last, last+contextLines)
+ u.hunks = append(u.hunks, h)
+ }
+ toLine += start - last
+ h = &hunk{
+ fromLine: start + 1,
+ toLine: toLine + 1,
+ }
+ // add the edge to the new hunk
+ delta := addEqualLines(h, lines, start-contextLines, start)
+ h.fromLine -= delta
+ h.toLine -= delta
+ }
+ last = start
+ for i := start; i < end; i++ {
+ h.lines = append(h.lines, line{kind: opDelete, content: lines[i]})
+ last++
+ }
+ if edit.New != "" {
+ for _, content := range splitLines(edit.New) {
+ h.lines = append(h.lines, line{kind: opInsert, content: content})
+ toLine++
+ }
+ }
+ }
+ if h != nil {
+ // add the edge to the final hunk
+ addEqualLines(h, lines, last, last+contextLines)
+ u.hunks = append(u.hunks, h)
+ }
+ return u, nil
+}
+
+func splitLines(text string) []string {
+ lines := strings.SplitAfter(text, "\n")
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ return lines
+}
+
+func addEqualLines(h *hunk, lines []string, start, end int) int {
+ delta := 0
+ for i := start; i < end; i++ {
+ if i < 0 {
+ continue
+ }
+ if i >= len(lines) {
+ return delta
+ }
+ h.lines = append(h.lines, line{kind: opEqual, content: lines[i]})
+ delta++
+ }
+ return delta
+}
+
+// String converts a unified diff to the standard textual form for that diff.
+// The output of this function can be passed to tools like patch.
+func (u unified) String() string {
+ if len(u.hunks) == 0 {
+ return ""
+ }
+ b := new(strings.Builder)
+ fmt.Fprintf(b, "--- %s\n", u.from)
+ fmt.Fprintf(b, "+++ %s\n", u.to)
+ for _, hunk := range u.hunks {
+ fromCount, toCount := 0, 0
+ for _, l := range hunk.lines {
+ switch l.kind {
+ case opDelete:
+ fromCount++
+ case opInsert:
+ toCount++
+ default:
+ fromCount++
+ toCount++
+ }
+ }
+ fmt.Fprint(b, "@@")
+ if fromCount > 1 {
+ fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount)
+ } else if hunk.fromLine == 1 && fromCount == 0 {
+ // Match odd GNU diff -u behavior adding to empty file.
+ fmt.Fprintf(b, " -0,0")
+ } else {
+ fmt.Fprintf(b, " -%d", hunk.fromLine)
+ }
+ if toCount > 1 {
+ fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount)
+ } else if hunk.toLine == 1 && toCount == 0 {
+ // Match odd GNU diff -u behavior adding to empty file.
+ fmt.Fprintf(b, " +0,0")
+ } else {
+ fmt.Fprintf(b, " +%d", hunk.toLine)
+ }
+ fmt.Fprint(b, " @@\n")
+ for _, l := range hunk.lines {
+ switch l.kind {
+ case opDelete:
+ fmt.Fprintf(b, "-%s", l.content)
+ case opInsert:
+ fmt.Fprintf(b, "+%s", l.content)
+ default:
+ fmt.Fprintf(b, " %s", l.content)
+ }
+ if !strings.HasSuffix(l.content, "\n") {
+ fmt.Fprintf(b, "\n\\ No newline at end of file\n")
+ }
+ }
+ }
+ return b.String()
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go
index 89017e9bfc..5a26c75aed 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go
@@ -1,18 +1,20 @@
package commands
import (
+ "context"
+ "encoding/json"
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
+ "strconv"
"strings"
"time"
hcversion "github.com/hashicorp/go-version"
"github.com/pelletier/go-toml/v2"
- "github.com/santhosh-tekuri/jsonschema/v5"
- "github.com/santhosh-tekuri/jsonschema/v5/httploader"
+ "github.com/santhosh-tekuri/jsonschema/v6"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/yaml.v3"
@@ -43,9 +45,7 @@ func (c *configCommand) executeVerify(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("[%s] validate: %w", usedConfigFile, err)
}
- detail := v.DetailedOutput()
-
- printValidationDetail(cmd, &detail)
+ printValidationDetail(cmd, v.DetailedOutput())
return errors.New("the configuration contains invalid elements")
}
@@ -100,10 +100,12 @@ func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error)
}
func validateConfiguration(schemaPath, targetFile string) error {
- httploader.Client = &http.Client{Timeout: 2 * time.Second}
-
compiler := jsonschema.NewCompiler()
- compiler.Draft = jsonschema.Draft7
+ compiler.UseLoader(jsonschema.SchemeURLLoader{
+ "file": jsonschema.FileLoader{},
+ "https": newJSONSchemaHTTPLoader(),
+ })
+ compiler.DefaultDraft(jsonschema.Draft7)
schema, err := compiler.Compile(schemaPath)
if err != nil {
@@ -133,10 +135,13 @@ func validateConfiguration(schemaPath, targetFile string) error {
return schema.Validate(m)
}
-func printValidationDetail(cmd *cobra.Command, detail *jsonschema.Detailed) {
- if detail.Error != "" {
+func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) {
+ if detail.Error != nil {
+ data, _ := json.Marshal(detail.Error)
+ details, _ := strconv.Unquote(string(data))
+
cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n",
- strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, detail.Error)
+ strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, details)
}
for _, d := range detail.Errors {
@@ -177,3 +182,33 @@ func decodeTomlFile(filename string) (any, error) {
return m, nil
}
+
+type jsonschemaHTTPLoader struct {
+ *http.Client
+}
+
+func newJSONSchemaHTTPLoader() *jsonschemaHTTPLoader {
+ return &jsonschemaHTTPLoader{Client: &http.Client{
+ Timeout: 2 * time.Second,
+ }}
+}
+
+func (l jsonschemaHTTPLoader) Load(url string) (any, error) {
+ req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := l.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode)
+ }
+
+ return jsonschema.UnmarshalJSON(resp.Body)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go
index 608f6b9de5..9f17018b2f 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go
@@ -28,11 +28,11 @@ func setupLintersFlagSet(v *viper.Viper, fs *pflag.FlagSet) {
color.GreenString("Enable only fast linters from enabled linters set (first run won't be fast)"))
internal.AddHackedStringSliceP(fs, "presets", "p",
- color.GreenString(fmt.Sprintf("Enable presets (%s) of linters.\n"+
- "Run 'golangci-lint help linters' to see them.\n"+
+ formatList("Enable presets of linters:", lintersdb.AllPresets(),
+ "Run 'golangci-lint help linters' to see them.",
"This option implies option --disable-all",
- strings.Join(lintersdb.AllPresets(), "|"),
- )))
+ ),
+ )
fs.StringSlice("enable-only", nil,
color.GreenString("Override linters configuration section to only run the specific linter(s)")) // Flags only.
@@ -49,14 +49,13 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) {
internal.AddFlagAndBind(v, fs, fs.String, "go", "run.go", "", color.GreenString("Targeted Go version"))
internal.AddHackedStringSlice(fs, "build-tags", color.GreenString("Build tags"))
- internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work"))
+ internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout,
+ color.GreenString("Timeout for total work. If <= 0, the timeout is disabled"))
internal.AddFlagAndBind(v, fs, fs.Bool, "tests", "run.tests", true, color.GreenString("Analyze tests (*_test.go)"))
internal.AddDeprecatedHackedStringSlice(fs, "skip-files", color.GreenString("Regexps of files to skip"))
internal.AddDeprecatedHackedStringSlice(fs, "skip-dirs", color.GreenString("Regexps of directories to skip"))
- internal.AddDeprecatedFlagAndBind(v, fs, fs.Bool, "skip-dirs-use-default", "run.skip-dirs-use-default", true,
- getDefaultDirectoryExcludeHelp())
const allowParallelDesc = "Allow multiple parallel golangci-lint instances running.\n" +
"If false (default) - golangci-lint acquires file lock on start."
@@ -69,13 +68,11 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) {
func setupOutputFlagSet(v *viper.Viper, fs *pflag.FlagSet) {
internal.AddFlagAndBind(v, fs, fs.String, "out-format", "output.formats", config.OutFormatColoredLineNumber,
- color.GreenString(fmt.Sprintf("Formats of output: %s", strings.Join(config.AllOutputFormats, "|"))))
+ formatList("Formats of output:", config.AllOutputFormats))
internal.AddFlagAndBind(v, fs, fs.Bool, "print-issued-lines", "output.print-issued-lines", true,
color.GreenString("Print lines of code with issue"))
internal.AddFlagAndBind(v, fs, fs.Bool, "print-linter-name", "output.print-linter-name", true,
color.GreenString("Print linter name in issue line"))
- internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "output.uniq-by-line", true,
- color.GreenString("Make issues output unique by line"))
internal.AddFlagAndBind(v, fs, fs.Bool, "sort-results", "output.sort-results", false,
color.GreenString("Sort linter results"))
internal.AddFlagAndBind(v, fs, fs.StringSlice, "sort-order", "output.sort-order", nil,
@@ -97,11 +94,13 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) {
color.GreenString("Maximum issues count per one linter. Set to 0 to disable"))
internal.AddFlagAndBind(v, fs, fs.Int, "max-same-issues", "issues.max-same-issues", 3,
color.GreenString("Maximum count of issues with the same text. Set to 0 to disable"))
+ internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "issues.uniq-by-line", true,
+ color.GreenString("Make issues output unique by line"))
internal.AddHackedStringSlice(fs, "exclude-files", color.GreenString("Regexps of files to exclude"))
internal.AddHackedStringSlice(fs, "exclude-dirs", color.GreenString("Regexps of directories to exclude"))
internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-dirs-use-default", "issues.exclude-dirs-use-default", true,
- getDefaultDirectoryExcludeHelp())
+ formatList("Use or not use default excluded directories:", processors.StdExcludeDirRegexps))
internal.AddFlagAndBind(v, fs, fs.String, "exclude-generated", "issues.exclude-generated", processors.AutogeneratedModeLax,
color.GreenString("Mode of the generated files analysis"))
@@ -123,6 +122,23 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) {
color.GreenString("Fix found issues (if it's supported by the linter)"))
}
+func formatList(head string, items []string, foot ...string) string {
+ parts := []string{color.GreenString(head)}
+ for _, p := range items {
+ parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(p)))
+ }
+
+ for _, s := range foot {
+ parts = append(parts, color.GreenString(s))
+ }
+
+ if len(foot) == 0 {
+ parts = append(parts, "")
+ }
+
+ return strings.Join(parts, "\n")
+}
+
func getDefaultIssueExcludeHelp() string {
parts := []string{color.GreenString("Use or not use default excludes:")}
@@ -135,12 +151,3 @@ func getDefaultIssueExcludeHelp() string {
return strings.Join(parts, "\n")
}
-
-func getDefaultDirectoryExcludeHelp() string {
- parts := []string{color.GreenString("Use or not use default excluded directories:")}
- for _, dir := range processors.StdExcludeDirRegexps {
- parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir)))
- }
- parts = append(parts, "")
- return strings.Join(parts, "\n")
-}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go
index 094e5d1905..02a586f4c1 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go
@@ -1,10 +1,13 @@
package commands
import (
+ "encoding/json"
"fmt"
"slices"
"sort"
"strings"
+ "unicode"
+ "unicode/utf8"
"github.com/fatih/color"
"github.com/spf13/cobra"
@@ -15,9 +18,27 @@ import (
"github.com/golangci/golangci-lint/pkg/logutils"
)
+type linterHelp struct {
+ Name string `json:"name"`
+ Desc string `json:"description"`
+ Fast bool `json:"fast"`
+ AutoFix bool `json:"autoFix"`
+ Presets []string `json:"presets"`
+ EnabledByDefault bool `json:"enabledByDefault"`
+ Deprecated bool `json:"deprecated"`
+ Since string `json:"since"`
+ OriginalURL string `json:"originalURL,omitempty"`
+}
+
+type helpOptions struct {
+ JSON bool
+}
+
type helpCommand struct {
cmd *cobra.Command
+ opts helpOptions
+
dbManager *lintersdb.Manager
log logutils.Log
@@ -35,16 +56,21 @@ func newHelpCommand(logger logutils.Log) *helpCommand {
},
}
- helpCmd.AddCommand(
- &cobra.Command{
- Use: "linters",
- Short: "Help about linters",
- Args: cobra.NoArgs,
- ValidArgsFunction: cobra.NoFileCompletions,
- Run: c.execute,
- PreRunE: c.preRunE,
- },
- )
+ lintersCmd := &cobra.Command{
+ Use: "linters",
+ Short: "Help about linters",
+ Args: cobra.NoArgs,
+ ValidArgsFunction: cobra.NoFileCompletions,
+ RunE: c.execute,
+ PreRunE: c.preRunE,
+ }
+
+ helpCmd.AddCommand(lintersCmd)
+
+ fs := lintersCmd.Flags()
+ fs.SortFlags = false // sort them as they are defined here
+
+ fs.BoolVar(&c.opts.JSON, "json", false, color.GreenString("Display as JSON"))
c.cmd = helpCmd
@@ -64,7 +90,41 @@ func (c *helpCommand) preRunE(_ *cobra.Command, _ []string) error {
return nil
}
-func (c *helpCommand) execute(_ *cobra.Command, _ []string) {
+func (c *helpCommand) execute(_ *cobra.Command, _ []string) error {
+ if c.opts.JSON {
+ return c.printJSON()
+ }
+
+ c.print()
+
+ return nil
+}
+
+func (c *helpCommand) printJSON() error {
+ var linters []linterHelp
+
+ for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() {
+ if lc.Internal {
+ continue
+ }
+
+ linters = append(linters, linterHelp{
+ Name: lc.Name(),
+ Desc: formatDescription(lc.Linter.Desc()),
+ Fast: !lc.IsSlowLinter(),
+ AutoFix: lc.CanAutoFix,
+ Presets: lc.InPresets,
+ EnabledByDefault: lc.EnabledByDefault,
+ Deprecated: lc.IsDeprecated(),
+ Since: lc.Since,
+ OriginalURL: lc.OriginalURL,
+ })
+ }
+
+ return json.NewEncoder(c.cmd.OutOrStdout()).Encode(linters)
+}
+
+func (c *helpCommand) print() {
var enabledLCs, disabledLCs []*linter.Config
for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() {
if lc.Internal {
@@ -124,19 +184,46 @@ func printLinters(lcs []*linter.Config) {
})
for _, lc := range lcs {
- // If the linter description spans multiple lines, truncate everything following the first newline
- linterDescription := lc.Linter.Desc()
- firstNewline := strings.IndexRune(linterDescription, '\n')
- if firstNewline > 0 {
- linterDescription = linterDescription[:firstNewline]
- }
+ desc := formatDescription(lc.Linter.Desc())
deprecatedMark := ""
if lc.IsDeprecated() {
deprecatedMark = " [" + color.RedString("deprecated") + "]"
}
- _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n",
- color.YellowString(lc.Name()), deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix)
+ var capabilities []string
+ if !lc.IsSlowLinter() {
+ capabilities = append(capabilities, color.BlueString("fast"))
+ }
+ if lc.CanAutoFix {
+ capabilities = append(capabilities, color.GreenString("auto-fix"))
+ }
+
+ var capability string
+ if capabilities != nil {
+ capability = " [" + strings.Join(capabilities, ", ") + "]"
+ }
+
+ _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s%s\n",
+ color.YellowString(lc.Name()), deprecatedMark, desc, capability)
+ }
+}
+
+func formatDescription(desc string) string {
+ // If the linter description spans multiple lines, truncate everything following the first newline
+ endFirstLine := strings.IndexRune(desc, '\n')
+ if endFirstLine > 0 {
+ desc = desc[:endFirstLine]
}
+
+ rawDesc := []rune(desc)
+
+ r, _ := utf8.DecodeRuneInString(desc)
+ rawDesc[0] = unicode.ToUpper(r)
+
+ if rawDesc[len(rawDesc)-1] != '.' {
+ rawDesc = append(rawDesc, '.')
+ }
+
+ return string(rawDesc)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
index ff7c5e467b..d9aa7578cd 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
@@ -238,14 +238,21 @@ func (c *runCommand) execute(_ *cobra.Command, args []string) {
needTrackResources := logutils.IsVerbose() || c.opts.PrintResourcesUsage
trackResourcesEndCh := make(chan struct{})
- defer func() { // XXX: this defer must be before ctx.cancel defer
- if needTrackResources { // wait until resource tracking finished to print properly
+
+ // Note: this defer must be before ctx.cancel defer
+ defer func() {
+ // wait until resource tracking finished to print properly
+ if needTrackResources {
<-trackResourcesEndCh
}
}()
- ctx, cancel := context.WithTimeout(context.Background(), c.cfg.Run.Timeout)
- defer cancel()
+ ctx := context.Background()
+ if c.cfg.Run.Timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, c.cfg.Run.Timeout)
+ defer cancel()
+ }
if needTrackResources {
go watchResources(ctx, trackResourcesEndCh, c.log, c.debugf)
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
index b863b329f9..579eddf594 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
@@ -1,11 +1,16 @@
package config
import (
+ "cmp"
+ "fmt"
"os"
+ "path/filepath"
+ "slices"
"strings"
hcversion "github.com/hashicorp/go-version"
- "github.com/ldez/gomoddirectives"
+ "github.com/ldez/grignotin/gomod"
+ "golang.org/x/mod/modfile"
)
// Config encapsulates the config data specified in the golangci-lint YAML config file.
@@ -80,12 +85,7 @@ func detectGoVersion() string {
return goVersion
}
- v := os.Getenv("GOVERSION")
- if v != "" {
- return v
- }
-
- return "1.17"
+ return cmp.Or(os.Getenv("GOVERSION"), "1.17")
}
// detectGoVersionFromGoMod tries to get Go version from go.mod.
@@ -93,8 +93,16 @@ func detectGoVersion() string {
// else it returns `go` version if present,
// else it returns empty.
func detectGoVersionFromGoMod() string {
- file, _ := gomoddirectives.GetModuleFile()
- if file == nil {
+ modPath, err := gomod.GetGoModPath()
+ if err != nil {
+ modPath = detectGoModFallback()
+ if modPath == "" {
+ return ""
+ }
+ }
+
+ file, err := parseGoMod(modPath)
+ if err != nil {
return ""
}
@@ -110,3 +118,41 @@ func detectGoVersionFromGoMod() string {
return ""
}
+
+func parseGoMod(goMod string) (*modfile.File, error) {
+ raw, err := os.ReadFile(filepath.Clean(goMod))
+ if err != nil {
+ return nil, fmt.Errorf("reading go.mod file: %w", err)
+ }
+
+ return modfile.Parse("go.mod", raw, nil)
+}
+
+func detectGoModFallback() string {
+ info, err := gomod.GetModuleInfo()
+ if err != nil {
+ return ""
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ return ""
+ }
+
+ slices.SortFunc(info, func(a, b gomod.ModInfo) int {
+ return cmp.Compare(len(b.Path), len(a.Path))
+ })
+
+ goMod := info[0]
+ for _, m := range info {
+ if !strings.HasPrefix(wd, m.Dir) {
+ continue
+ }
+
+ goMod = m
+
+ break
+ }
+
+ return goMod.GoMod
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go
index 2ee9364aaa..081b87624d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go
@@ -117,8 +117,9 @@ type Issues struct {
UseDefaultExcludeDirs bool `mapstructure:"exclude-dirs-use-default"`
- MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"`
- MaxSameIssues int `mapstructure:"max-same-issues"`
+ MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"`
+ MaxSameIssues int `mapstructure:"max-same-issues"`
+ UniqByLine bool `mapstructure:"uniq-by-line"`
DiffFromRevision string `mapstructure:"new-from-rev"`
DiffPatchFilePath string `mapstructure:"new-from-patch"`
@@ -127,7 +128,7 @@ type Issues struct {
NeedFix bool `mapstructure:"fix"`
- ExcludeGeneratedStrict bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead.
+ ExcludeGeneratedStrict *bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead.
}
func (i *Issues) Validate() error {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go
index b182d1e0f1..8e6c184ca4 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go
@@ -105,6 +105,7 @@ var defaultLintersSettings = LintersSettings{
Kitlog: true,
Klog: true,
Logr: true,
+ Slog: true,
Zap: true,
RequireStringKey: false,
NoPrintfLike: false,
@@ -168,7 +169,6 @@ var defaultLintersSettings = LintersSettings{
Unused: UnusedSettings{
FieldWritesAreUses: true,
PostStatementsAreReads: false,
- ExportedIsUsed: true,
ExportedFieldsAreUsed: true,
ParametersAreUsed: true,
LocalVariablesAreUsed: true,
@@ -178,6 +178,15 @@ var defaultLintersSettings = LintersSettings{
HTTPMethod: true,
HTTPStatusCode: true,
},
+ UseTesting: UseTestingSettings{
+ ContextBackground: true,
+ ContextTodo: true,
+ OSChdir: true,
+ OSMkdirTemp: true,
+ OSSetenv: false,
+ OSTempDir: false,
+ OSCreateTemp: true,
+ },
Varnamelen: VarnamelenSettings{
MaxDistance: 5,
MinNameLength: 3,
@@ -261,6 +270,7 @@ type LintersSettings struct {
Promlinter PromlinterSettings
ProtoGetter ProtoGetterSettings
Reassign ReassignSettings
+ Recvcheck RecvcheckSettings
Revive ReviveSettings
RowsErrCheck RowsErrCheckSettings
SlogLint SlogLintSettings
@@ -277,6 +287,7 @@ type LintersSettings struct {
Unparam UnparamSettings
Unused UnusedSettings
UseStdlibVars UseStdlibVarsSettings
+ UseTesting UseTestingSettings
Varnamelen VarnamelenSettings
Whitespace WhitespaceSettings
Wrapcheck WrapcheckSettings
@@ -318,8 +329,10 @@ type BiDiChkSettings struct {
}
type CopyLoopVarSettings struct {
- IgnoreAlias bool `mapstructure:"ignore-alias"` // Deprecated: use CheckAlias
- CheckAlias bool `mapstructure:"check-alias"`
+ CheckAlias bool `mapstructure:"check-alias"`
+
+ // Deprecated: use CheckAlias
+ IgnoreAlias *bool `mapstructure:"ignore-alias"`
}
type Cyclop struct {
@@ -466,10 +479,12 @@ type FunlenSettings struct {
}
type GciSettings struct {
- Sections []string `mapstructure:"sections"`
- SkipGenerated bool `mapstructure:"skip-generated"`
- CustomOrder bool `mapstructure:"custom-order"`
- NoLexOrder bool `mapstructure:"no-lex-order"`
+ Sections []string `mapstructure:"sections"`
+ NoInlineComments bool `mapstructure:"no-inline-comments"`
+ NoPrefixComments bool `mapstructure:"no-prefix-comments"`
+ SkipGenerated bool `mapstructure:"skip-generated"`
+ CustomOrder bool `mapstructure:"custom-order"`
+ NoLexOrder bool `mapstructure:"no-lex-order"`
// Deprecated: use Sections instead.
LocalPrefixes string `mapstructure:"local-prefixes"`
@@ -492,6 +507,7 @@ type GinkgoLinterSettings struct {
type GoChecksumTypeSettings struct {
DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"`
+ IncludeSharedInterfaces bool `mapstructure:"include-shared-interfaces"`
}
type GocognitSettings struct {
@@ -534,7 +550,7 @@ type GodotSettings struct {
Period bool `mapstructure:"period"`
// Deprecated: use Scope instead
- CheckAll bool `mapstructure:"check-all"`
+ CheckAll *bool `mapstructure:"check-all"`
}
type GodoxSettings struct {
@@ -574,6 +590,11 @@ type GoModDirectivesSettings struct {
ReplaceLocal bool `mapstructure:"replace-local"`
ExcludeForbidden bool `mapstructure:"exclude-forbidden"`
RetractAllowNoExplanation bool `mapstructure:"retract-allow-no-explanation"`
+ ToolchainForbidden bool `mapstructure:"toolchain-forbidden"`
+ ToolchainPattern string `mapstructure:"toolchain-pattern"`
+ ToolForbidden bool `mapstructure:"tool-forbidden"`
+ GoDebugForbidden bool `mapstructure:"go-debug-forbidden"`
+ GoVersionPattern string `mapstructure:"go-version-pattern"`
}
type GoModGuardSettings struct {
@@ -622,7 +643,7 @@ type GovetSettings struct {
Settings map[string]map[string]any
// Deprecated: the linter should be enabled inside Enable.
- CheckShadowing bool `mapstructure:"check-shadowing"`
+ CheckShadowing *bool `mapstructure:"check-shadowing"`
}
func (cfg *GovetSettings) Validate() error {
@@ -687,6 +708,7 @@ type LoggerCheckSettings struct {
Kitlog bool `mapstructure:"kitlog"`
Klog bool `mapstructure:"klog"`
Logr bool `mapstructure:"logr"`
+ Slog bool `mapstructure:"slog"`
Zap bool `mapstructure:"zap"`
RequireStringKey bool `mapstructure:"require-string-key"`
NoPrintfLike bool `mapstructure:"no-printf-like"`
@@ -798,6 +820,11 @@ type ReassignSettings struct {
Patterns []string `mapstructure:"patterns"`
}
+type RecvcheckSettings struct {
+ DisableBuiltin bool `mapstructure:"disable-builtin"`
+ Exclusions []string `mapstructure:"exclusions"`
+}
+
type ReviveSettings struct {
Go string `mapstructure:"-"`
MaxOpenFiles int `mapstructure:"max-open-files"`
@@ -837,7 +864,7 @@ type SlogLintSettings struct {
ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"`
// Deprecated: use Context instead.
- ContextOnly bool `mapstructure:"context-only"`
+ ContextOnly *bool `mapstructure:"context-only"`
}
type SpancheckSettings struct {
@@ -868,10 +895,31 @@ type TagAlignSettings struct {
}
type TagliatelleSettings struct {
- Case struct {
- Rules map[string]string
- UseFieldName bool `mapstructure:"use-field-name"`
- }
+ Case TagliatelleCase
+}
+
+type TagliatelleCase struct {
+ TagliatelleBase `mapstructure:",squash"`
+ Overrides []TagliatelleOverrides
+}
+
+type TagliatelleOverrides struct {
+ TagliatelleBase `mapstructure:",squash"`
+ Package string `mapstructure:"pkg"`
+ Ignore bool `mapstructure:"ignore"`
+}
+
+type TagliatelleBase struct {
+ Rules map[string]string `mapstructure:"rules"`
+ ExtendedRules map[string]TagliatelleExtendedRule `mapstructure:"extended-rules"`
+ UseFieldName bool `mapstructure:"use-field-name"`
+ IgnoredFields []string `mapstructure:"ignored-fields"`
+}
+
+type TagliatelleExtendedRule struct {
+ Case string
+ ExtraInitialisms bool
+ InitialismOverrides map[string]bool
}
type TestifylintSettings struct {
@@ -936,11 +984,24 @@ type UseStdlibVarsSettings struct {
TimeLayout bool `mapstructure:"time-layout"`
CryptoHash bool `mapstructure:"crypto-hash"`
DefaultRPCPath bool `mapstructure:"default-rpc-path"`
- OSDevNull bool `mapstructure:"os-dev-null"` // Deprecated
SQLIsolationLevel bool `mapstructure:"sql-isolation-level"`
TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"`
ConstantKind bool `mapstructure:"constant-kind"`
- SyslogPriority bool `mapstructure:"syslog-priority"` // Deprecated
+
+ // Deprecated
+ OSDevNull *bool `mapstructure:"os-dev-null"`
+ // Deprecated
+ SyslogPriority *bool `mapstructure:"syslog-priority"`
+}
+
+type UseTestingSettings struct {
+ ContextBackground bool `mapstructure:"context-background"`
+ ContextTodo bool `mapstructure:"context-todo"`
+ OSChdir bool `mapstructure:"os-chdir"`
+ OSMkdirTemp bool `mapstructure:"os-mkdir-temp"`
+ OSSetenv bool `mapstructure:"os-setenv"`
+ OSTempDir bool `mapstructure:"os-temp-dir"`
+ OSCreateTemp bool `mapstructure:"os-create-temp"`
}
type UnconvertSettings struct {
@@ -956,11 +1017,13 @@ type UnparamSettings struct {
type UnusedSettings struct {
FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"`
PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"`
- ExportedIsUsed bool `mapstructure:"exported-is-used"` // Deprecated
ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"`
ParametersAreUsed bool `mapstructure:"parameters-are-used"`
LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"`
GeneratedIsUsed bool `mapstructure:"generated-is-used"`
+
+ // Deprecated
+ ExportedIsUsed *bool `mapstructure:"exported-is-used"`
}
type VarnamelenSettings struct {
@@ -982,6 +1045,7 @@ type WhitespaceSettings struct {
}
type WrapcheckSettings struct {
+ ExtraIgnoreSigs []string `mapstructure:"extra-ignore-sigs"`
// TODO(ldez): v2 the options must be renamed to use hyphen.
IgnoreSigs []string `mapstructure:"ignoreSigs"`
IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"`
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go
index efeed3ca4d..56f57d9d5d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go
@@ -1,6 +1,7 @@
package config
import (
+ "cmp"
"errors"
"fmt"
"os"
@@ -292,9 +293,7 @@ func (l *Loader) handleGoVersion() {
l.cfg.LintersSettings.ParallelTest.Go = l.cfg.Run.Go
- if l.cfg.LintersSettings.Gofumpt.LangVersion == "" {
- l.cfg.LintersSettings.Gofumpt.LangVersion = l.cfg.Run.Go
- }
+ l.cfg.LintersSettings.Gofumpt.LangVersion = cmp.Or(l.cfg.LintersSettings.Gofumpt.LangVersion, l.cfg.Run.Go)
trimmedGoVersion := goutil.TrimGoVersion(l.cfg.Run.Go)
@@ -322,19 +321,23 @@ func (l *Loader) handleDeprecation() error {
l.cfg.Issues.ExcludeDirs = l.cfg.Run.SkipDirs
}
- // The 2 options are true by default.
// Deprecated since v1.57.0
- if !l.cfg.Run.UseDefaultSkipDirs {
+ if l.cfg.Run.UseDefaultSkipDirs != nil {
l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.")
+ l.cfg.Issues.UseDefaultExcludeDirs = *l.cfg.Run.UseDefaultSkipDirs
}
- l.cfg.Issues.UseDefaultExcludeDirs = l.cfg.Run.UseDefaultSkipDirs && l.cfg.Issues.UseDefaultExcludeDirs
- // The 2 options are false by default.
// Deprecated since v1.57.0
- if l.cfg.Run.ShowStats {
+ if l.cfg.Run.ShowStats != nil {
l.log.Warnf("The configuration option `run.show-stats` is deprecated, please use `output.show-stats`")
+ l.cfg.Output.ShowStats = *l.cfg.Run.ShowStats
+ }
+
+ // Deprecated since v1.63.0
+ if l.cfg.Output.UniqByLine != nil {
+ l.log.Warnf("The configuration option `output.uniq-by-line` is deprecated, please use `issues.uniq-by-line`")
+ l.cfg.Issues.UniqByLine = *l.cfg.Output.UniqByLine
}
- l.cfg.Output.ShowStats = l.cfg.Run.ShowStats || l.cfg.Output.ShowStats
// Deprecated since v1.57.0
if l.cfg.Output.Format != "" {
@@ -357,9 +360,11 @@ func (l *Loader) handleDeprecation() error {
}
// Deprecated since v1.59.0
- if l.cfg.Issues.ExcludeGeneratedStrict {
+ if l.cfg.Issues.ExcludeGeneratedStrict != nil {
l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`")
- l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies.
+ if !*l.cfg.Issues.ExcludeGeneratedStrict {
+ l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies.
+ }
}
l.handleLinterOptionDeprecations()
@@ -367,16 +372,15 @@ func (l *Loader) handleDeprecation() error {
return nil
}
-//nolint:gocyclo // the complexity cannot be reduced.
func (l *Loader) handleLinterOptionDeprecations() {
// Deprecated since v1.57.0,
// but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697).
- if l.cfg.LintersSettings.Govet.CheckShadowing {
+ if l.cfg.LintersSettings.Govet.CheckShadowing != nil {
l.log.Warnf("The configuration option `linters.govet.check-shadowing` is deprecated. " +
"Please enable `shadow` instead, if you are not using `enable-all`.")
}
- if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias {
+ if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias != nil {
l.log.Warnf("The configuration option `linters.copyloopvar.ignore-alias` is deprecated and ignored," +
"please use `linters.copyloopvar.check-alias`.")
}
@@ -398,7 +402,7 @@ func (l *Loader) handleLinterOptionDeprecations() {
}
// Deprecated since v1.33.0.
- if l.cfg.LintersSettings.Godot.CheckAll {
+ if l.cfg.LintersSettings.Godot.CheckAll != nil {
l.log.Warnf("The configuration option `linters.godot.check-all` is deprecated, please use `linters.godot.scope: all`.")
}
@@ -423,25 +427,23 @@ func (l *Loader) handleLinterOptionDeprecations() {
}
// Deprecated since v1.60.0
- if !l.cfg.LintersSettings.Unused.ExportedIsUsed {
+ if l.cfg.LintersSettings.Unused.ExportedIsUsed != nil {
l.log.Warnf("The configuration option `linters.unused.exported-is-used` is deprecated.")
}
// Deprecated since v1.58.0
- if l.cfg.LintersSettings.SlogLint.ContextOnly {
+ if l.cfg.LintersSettings.SlogLint.ContextOnly != nil {
l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.")
- if l.cfg.LintersSettings.SlogLint.Context == "" {
- l.cfg.LintersSettings.SlogLint.Context = "all"
- }
+ l.cfg.LintersSettings.SlogLint.Context = cmp.Or(l.cfg.LintersSettings.SlogLint.Context, "all")
}
// Deprecated since v1.51.0
- if l.cfg.LintersSettings.UseStdlibVars.OSDevNull {
+ if l.cfg.LintersSettings.UseStdlibVars.OSDevNull != nil {
l.log.Warnf("The configuration option `linters.usestdlibvars.os-dev-null` is deprecated.")
}
// Deprecated since v1.51.0
- if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority {
+ if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority != nil {
l.log.Warnf("The configuration option `linters.usestdlibvars.syslog-priority` is deprecated.")
}
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go
index 6a26d5773e..aaa5183ec4 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go
@@ -43,7 +43,6 @@ type Output struct {
Formats OutputFormats `mapstructure:"formats"`
PrintIssuedLine bool `mapstructure:"print-issued-lines"`
PrintLinterName bool `mapstructure:"print-linter-name"`
- UniqByLine bool `mapstructure:"uniq-by-line"`
SortResults bool `mapstructure:"sort-results"`
SortOrder []string `mapstructure:"sort-order"`
PathPrefix string `mapstructure:"path-prefix"`
@@ -51,6 +50,9 @@ type Output struct {
// Deprecated: use Formats instead.
Format string `mapstructure:"format"`
+
+ // Deprecated: use [Issues.UniqByLine] instead.
+ UniqByLine *bool `mapstructure:"uniq-by-line"`
}
func (o *Output) Validate() error {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go
index 2f6523c0b9..784e8c2fad 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go
@@ -29,10 +29,10 @@ type Run struct {
// Deprecated: use Issues.ExcludeDirs instead.
SkipDirs []string `mapstructure:"skip-dirs"`
// Deprecated: use Issues.UseDefaultExcludeDirs instead.
- UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"`
+ UseDefaultSkipDirs *bool `mapstructure:"skip-dirs-use-default"`
// Deprecated: use Output.ShowStats instead.
- ShowStats bool `mapstructure:"show-stats"`
+ ShowStats *bool `mapstructure:"show-stats"`
}
func (r *Run) Validate() error {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go
index 80bb9c5b44..91d12e5cd3 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go
@@ -61,7 +61,7 @@ func EvalSymlinks(path string) (string, error) {
}
var er evalSymlinkRes
- er.path, er.err = filepath.EvalSymlinks(path)
+ er.path, er.err = evalSymlinks(path)
evalSymlinkCache.Store(path, er)
return er.path, er.err
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go
new file mode 100644
index 0000000000..68e762cf4b
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go
@@ -0,0 +1,9 @@
+//go:build !windows
+
+package fsutils
+
+import "path/filepath"
+
+func evalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go
new file mode 100644
index 0000000000..19efb1cfc2
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go
@@ -0,0 +1,39 @@
+//go:build windows
+
+package fsutils
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// This is a workaround for the behavior of [filepath.EvalSymlinks],
+// which fails with [syscall.ENOTDIR] if the specified path contains a junction on Windows.
+// Junctions can occur, for example, when a volume is mounted as a subdirectory inside another drive.
+// This can usually happen when using the Dev Drives feature and replacing existing directories.
+// See: https://github.com/golang/go/issues/40180
+//
+// Since [syscall.ENOTDIR] is only returned when calling [filepath.EvalSymlinks] on Windows
+// if part of the presented path is a junction and nothing before was a symlink,
+// we simply treat this as NOT symlink,
+// because a symlink over the junction makes no sense at all.
+func evalSymlinks(path string) (string, error) {
+ resolved, err := filepath.EvalSymlinks(path)
+ if err == nil {
+ return resolved, nil
+ }
+
+ if !errors.Is(err, syscall.ENOTDIR) {
+ return "", err
+ }
+
+ _, err = os.Stat(path)
+ if err != nil {
+ return "", err
+ }
+
+ // If exists, we make the path absolute, to be sure...
+ return filepath.Abs(path)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go
index 15d8dd2b33..854e7d15f0 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go
@@ -26,7 +26,7 @@ type EncodingIssue struct {
Severity string
Pos token.Position
LineRange *result.Range
- Replacement *result.Replacement
+ SuggestedFixes []analysis.SuggestedFix
ExpectNoLint bool
ExpectedNoLintLinter string
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go
new file mode 100644
index 0000000000..28441b341a
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go
@@ -0,0 +1,50 @@
+package goanalysis
+
+import (
+ "go/ast"
+ "go/token"
+ "path/filepath"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+func GetGoFilePosition(pass *analysis.Pass, f *ast.File) (token.Position, bool) {
+ position := GetFilePositionFor(pass.Fset, f.Pos())
+
+ if filepath.Ext(position.Filename) == ".go" {
+ return position, true
+ }
+
+ return position, false
+}
+
+func GetFilePositionFor(fset *token.FileSet, p token.Pos) token.Position {
+ pos := fset.PositionFor(p, true)
+
+ ext := filepath.Ext(pos.Filename)
+ if ext != ".go" {
+ // position has been adjusted to a non-go file, revert to original file
+ return fset.PositionFor(p, false)
+ }
+
+ return pos
+}
+
+func EndOfLinePos(f *token.File, line int) token.Pos {
+ var end token.Pos
+
+ if line >= f.LineCount() {
+ // missing newline at the end of the file
+ end = f.Pos(f.Size())
+ } else {
+ end = f.LineStart(line+1) - token.Pos(1)
+ }
+
+ return end
+}
+
+// AdjustPos is a hack to get the right line to display.
+// It should not be used outside some specific cases.
+func AdjustPos(line, nonAdjLine, adjLine int) int {
+ return line + nonAdjLine - adjLine
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go
index ac03c71ecc..3a8652486c 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go
@@ -42,6 +42,7 @@ type Diagnostic struct {
Analyzer *analysis.Analyzer
Position token.Position
Pkg *packages.Package
+ File *token.File
}
type runner struct {
@@ -121,9 +122,9 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package,
}
act = actAlloc.alloc()
- act.a = a
- act.pkg = pkg
- act.r = r
+ act.Analyzer = a
+ act.Package = pkg
+ act.runner = r
act.isInitialPkg = initialPkgs[pkg]
act.needAnalyzeSource = initialPkgs[pkg]
act.analysisDoneCh = make(chan struct{})
@@ -132,11 +133,11 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package,
if len(a.FactTypes) > 0 {
depsCount += len(pkg.Imports)
}
- act.deps = make([]*action, 0, depsCount)
+ act.Deps = make([]*action, 0, depsCount)
// Add a dependency on each required analyzers.
for _, req := range a.Requires {
- act.deps = append(act.deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc))
+ act.Deps = append(act.Deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc))
}
r.buildActionFactDeps(act, a, pkg, initialPkgs, actions, actAlloc)
@@ -162,7 +163,7 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac
sort.Strings(paths) // for determinism
for _, path := range paths {
dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc)
- act.deps = append(act.deps, dep)
+ act.Deps = append(act.Deps, dep)
}
// Need to register fact types for pkgcache proper gob encoding.
@@ -203,7 +204,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package,
for _, a := range analyzers {
for _, pkg := range pkgs {
root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc)
- root.isroot = true
+ root.IsRoot = true
roots = append(roots, root)
}
}
@@ -220,7 +221,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze
actionPerPkg := map[*packages.Package][]*action{}
for _, act := range actions {
- actionPerPkg[act.pkg] = append(actionPerPkg[act.pkg], act)
+ actionPerPkg[act.Package] = append(actionPerPkg[act.Package], act)
}
// Fill Imports field.
@@ -250,7 +251,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze
}
}
for _, act := range actions {
- dfs(act.pkg)
+ dfs(act.Package)
}
// Limit memory and IO usage.
@@ -282,7 +283,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err
for _, act := range actions {
if !extracted[act] {
extracted[act] = true
- visitAll(act.deps)
+ visitAll(act.Deps)
extract(act)
}
}
@@ -299,31 +300,34 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err
seen := make(map[key]bool)
extract = func(act *action) {
- if act.err != nil {
- if pe, ok := act.err.(*errorutil.PanicError); ok {
+ if act.Err != nil {
+ if pe, ok := act.Err.(*errorutil.PanicError); ok {
panic(pe)
}
- retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err))
+ retErrors = append(retErrors, fmt.Errorf("%s: %w", act.Analyzer.Name, act.Err))
return
}
- if act.isroot {
- for _, diag := range act.diagnostics {
+ if act.IsRoot {
+ for _, diag := range act.Diagnostics {
// We don't display a.Name/f.Category
// as most users don't care.
- posn := act.pkg.Fset.Position(diag.Pos)
- k := key{posn, act.a, diag.Message}
+ position := GetFilePositionFor(act.Package.Fset, diag.Pos)
+ file := act.Package.Fset.File(diag.Pos)
+
+ k := key{Position: position, Analyzer: act.Analyzer, message: diag.Message}
if seen[k] {
continue // duplicate
}
seen[k] = true
retDiag := Diagnostic{
+ File: file,
Diagnostic: diag,
- Analyzer: act.a,
- Position: posn,
- Pkg: act.pkg,
+ Analyzer: act.Analyzer,
+ Position: position,
+ Pkg: act.Package,
}
retDiags = append(retDiags, retDiag)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go
index 152cab1814..2e1c414228 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go
@@ -29,8 +29,8 @@ func (actAlloc *actionAllocator) alloc() *action {
}
func (act *action) waitUntilDependingAnalyzersWorked() {
- for _, dep := range act.deps {
- if dep.pkg == act.pkg {
+ for _, dep := range act.Deps {
+ if dep.Package == act.Package {
<-dep.analysisDoneCh
}
}
@@ -39,26 +39,26 @@ func (act *action) waitUntilDependingAnalyzersWorked() {
func (act *action) analyzeSafe() {
defer func() {
if p := recover(); p != nil {
- if !act.isroot {
+ if !act.IsRoot {
// This line allows to display "hidden" panic with analyzers like buildssa.
// Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that,
// this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA").
- act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack()))
+ act.runner.log.Errorf("%s: panic during analysis: %v, %s", act.Analyzer.Name, p, string(debug.Stack()))
}
- act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s",
- act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack())
+ act.Err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s",
+ act.Analyzer.Name, act.Package.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack())
}
}()
- act.r.sw.TrackStage(act.a.Name, act.analyze)
+ act.runner.sw.TrackStage(act.Analyzer.Name, act.analyze)
}
func (act *action) markDepsForAnalyzingSource() {
// Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing
// this action.
- for _, dep := range act.deps {
- if dep.pkg == act.pkg {
+ for _, dep := range act.Deps {
+ if dep.Package == act.Package {
// Analyze source only for horizontal dependencies, e.g. from "buildssa".
dep.needAnalyzeSource = true // can't be set in parallel
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go
index fbc2f82fa9..e06ea2979c 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go
@@ -26,7 +26,7 @@ func (act *action) loadCachedFacts() bool {
return true // load cached facts only for non-initial packages
}
- if len(act.a.FactTypes) == 0 {
+ if len(act.Analyzer.FactTypes) == 0 {
return true // no need to load facts
}
@@ -38,7 +38,7 @@ func (act *action) loadCachedFacts() bool {
}
func (act *action) persistFactsToCache() error {
- analyzer := act.a
+ analyzer := act.Analyzer
if len(analyzer.FactTypes) == 0 {
return nil
}
@@ -46,7 +46,7 @@ func (act *action) persistFactsToCache() error {
// Merge new facts into the package and persist them.
var facts []Fact
for key, fact := range act.packageFacts {
- if key.pkg != act.pkg.Types {
+ if key.pkg != act.Package.Types {
// The fact is from inherited facts from another package
continue
}
@@ -57,7 +57,7 @@ func (act *action) persistFactsToCache() error {
}
for key, fact := range act.objectFacts {
obj := key.obj
- if obj.Pkg() != act.pkg.Types {
+ if obj.Pkg() != act.Package.Types {
// The fact is from inherited facts from another package
continue
}
@@ -74,33 +74,33 @@ func (act *action) persistFactsToCache() error {
})
}
- factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
+ factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name)
- return act.r.pkgCache.Put(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts)
+ return act.runner.pkgCache.Put(act.Package, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts)
}
func (act *action) loadPersistedFacts() bool {
var facts []Fact
- err := act.r.pkgCache.Get(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(act.a), &facts)
+ err := act.runner.pkgCache.Get(act.Package, cache.HashModeNeedAllDeps, factCacheKey(act.Analyzer), &facts)
if err != nil {
if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) {
- act.r.log.Warnf("Failed to get persisted facts: %s", err)
+ act.runner.log.Warnf("Failed to get persisted facts: %s", err)
}
- factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name)
+ factsCacheDebugf("No cached facts for package %q and analyzer %s", act.Package.Name, act.Analyzer.Name)
return false
}
- factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
+ factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name)
for _, f := range facts {
if f.Path == "" { // this is a package fact
- key := packageFactKey{act.pkg.Types, act.factType(f.Fact)}
+ key := packageFactKey{act.Package.Types, act.factType(f.Fact)}
act.packageFacts[key] = f.Fact
continue
}
- obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path))
+ obj, err := objectpath.Object(act.Package.Types, objectpath.Path(f.Path))
if err != nil {
// Be lenient about these errors. For example, when
// analyzing io/ioutil from source, we may get a fact
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go
similarity index 52%
rename from hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go
rename to hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go
index d868f8f5da..376a37f039 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-// Partial copy of https://github.com/golang/tools/blob/dba5486c2a1d03519930812112b23ed2c45c04fc/go/analysis/internal/checker/checker.go
+// Altered copy of https://github.com/golang/tools/blob/v0.28.0/go/analysis/internal/checker/checker.go
package goanalysis
@@ -18,6 +18,8 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
+ "github.com/golangci/golangci-lint/internal/x/tools/analysisflags"
+ "github.com/golangci/golangci-lint/internal/x/tools/analysisinternal"
"github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors"
)
@@ -27,19 +29,21 @@ import (
// package (as different analyzers are applied, either in sequence or
// parallel), and across packages (as dependencies are analyzed).
type action struct {
- a *analysis.Analyzer
- pkg *packages.Package
+ Analyzer *analysis.Analyzer
+ Package *packages.Package
+ IsRoot bool // whether this is a root node of the graph
+ Deps []*action
+ Result any // computed result of Analyzer.run, if any (and if IsRoot)
+ Err error // error result of Analyzer.run
+ Diagnostics []analysis.Diagnostic
+ Duration time.Duration // execution time of this step
+
pass *analysis.Pass
- isroot bool
- deps []*action
objectFacts map[objectFactKey]analysis.Fact
packageFacts map[packageFactKey]analysis.Fact
- result any
- diagnostics []analysis.Diagnostic
- err error
// NOTE(ldez) custom fields.
- r *runner
+ runner *runner
analysisDoneCh chan struct{}
loadCachedFactsDone bool
loadCachedFactsOk bool
@@ -61,7 +65,7 @@ type packageFactKey struct {
// NOTE(ldez) no alteration.
func (act *action) String() string {
- return fmt.Sprintf("%s@%s", act.a, act.pkg)
+ return fmt.Sprintf("%s@%s", act.Analyzer, act.Package)
}
// NOTE(ldez) altered version of `func (act *action) execOnce()`.
@@ -72,20 +76,26 @@ func (act *action) analyze() {
return
}
- defer func(now time.Time) {
- analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now))
- }(time.Now())
+ // Record time spent in this node but not its dependencies.
+ // In parallel mode, due to GC/scheduler contention, the
+ // time is 5x higher than in sequential mode, even with a
+ // semaphore limiting the number of threads here.
+ // So use -debug=tp.
+ t0 := time.Now()
+ defer func() {
+ act.Duration = time.Since(t0)
+ analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.runner.prefix, act.Analyzer.Name, act.Package.Name, time.Since(t0))
+ }()
// Report an error if any dependency failures.
var depErrors error
- for _, dep := range act.deps {
- if dep.err != nil {
- depErrors = errors.Join(depErrors, errors.Unwrap(dep.err))
+ for _, dep := range act.Deps {
+ if dep.Err != nil {
+ depErrors = errors.Join(depErrors, errors.Unwrap(dep.Err))
}
}
-
if depErrors != nil {
- act.err = fmt.Errorf("failed prerequisites: %w", depErrors)
+ act.Err = fmt.Errorf("failed prerequisites: %w", depErrors)
return
}
@@ -94,15 +104,14 @@ func (act *action) analyze() {
inputs := make(map[*analysis.Analyzer]any)
act.objectFacts = make(map[objectFactKey]analysis.Fact)
act.packageFacts = make(map[packageFactKey]analysis.Fact)
- startedAt := time.Now()
-
- for _, dep := range act.deps {
- if dep.pkg == act.pkg {
+ for _, dep := range act.Deps {
+ if dep.Package == act.Package {
// Same package, different analysis (horizontal edge):
// in-memory outputs of prerequisite analyzers
// become inputs to this analysis pass.
- inputs[dep.a] = dep.result
- } else if dep.a == act.a { // (always true)
+ inputs[dep.Analyzer] = dep.Result
+
+ } else if dep.Analyzer == act.Analyzer { // (always true)
// Same analysis, different package (vertical edge):
// serialized facts produced by prerequisite analysis
// become available to this analysis pass.
@@ -110,10 +119,20 @@ func (act *action) analyze() {
}
}
- factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt))
+ // NOTE(ldez) this is not compatible with our implementation.
+ // Quick (nonexhaustive) check that the correct go/packages mode bits were used.
+ // (If there were errors, all bets are off.)
+ // if pkg := act.Package; pkg.Errors == nil {
+ // if pkg.Name == "" || pkg.PkgPath == "" || pkg.Types == nil || pkg.Fset == nil || pkg.TypesSizes == nil {
+ // panic(fmt.Sprintf("packages must be loaded with packages.LoadSyntax mode: Name: %v, PkgPath: %v, Types: %v, Fset: %v, TypesSizes: %v",
+ // pkg.Name == "", pkg.PkgPath == "", pkg.Types == nil, pkg.Fset == nil, pkg.TypesSizes == nil))
+ // }
+ // }
+
+ factsDebugf("%s: Inherited facts in %s", act, time.Since(t0))
module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers.
- if mod := act.pkg.Module; mod != nil {
+ if mod := act.Package.Module; mod != nil {
module.Path = mod.Path
module.Version = mod.Version
module.GoVersion = mod.GoVersion
@@ -121,79 +140,104 @@ func (act *action) analyze() {
// Run the analysis.
pass := &analysis.Pass{
- Analyzer: act.a,
- Fset: act.pkg.Fset,
- Files: act.pkg.Syntax,
- OtherFiles: act.pkg.OtherFiles,
- IgnoredFiles: act.pkg.IgnoredFiles,
- Pkg: act.pkg.Types,
- TypesInfo: act.pkg.TypesInfo,
- TypesSizes: act.pkg.TypesSizes,
- TypeErrors: act.pkg.TypeErrors,
+ Analyzer: act.Analyzer,
+ Fset: act.Package.Fset,
+ Files: act.Package.Syntax,
+ OtherFiles: act.Package.OtherFiles,
+ IgnoredFiles: act.Package.IgnoredFiles,
+ Pkg: act.Package.Types,
+ TypesInfo: act.Package.TypesInfo,
+ TypesSizes: act.Package.TypesSizes,
+ TypeErrors: act.Package.TypeErrors,
Module: module,
ResultOf: inputs,
- Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
- ImportObjectFact: act.importObjectFact,
+ Report: func(d analysis.Diagnostic) { act.Diagnostics = append(act.Diagnostics, d) },
+ ImportObjectFact: act.ObjectFact,
ExportObjectFact: act.exportObjectFact,
- ImportPackageFact: act.importPackageFact,
+ ImportPackageFact: act.PackageFact,
ExportPackageFact: act.exportPackageFact,
- AllObjectFacts: act.allObjectFacts,
- AllPackageFacts: act.allPackageFacts,
+ AllObjectFacts: act.AllObjectFacts,
+ AllPackageFacts: act.AllPackageFacts,
}
-
+ pass.ReadFile = analysisinternal.MakeReadFile(pass)
act.pass = pass
- act.r.passToPkgGuard.Lock()
- act.r.passToPkg[pass] = act.pkg
- act.r.passToPkgGuard.Unlock()
- if act.pkg.IllTyped {
+ act.runner.passToPkgGuard.Lock()
+ act.runner.passToPkg[pass] = act.Package
+ act.runner.passToPkgGuard.Unlock()
+
+ act.Result, act.Err = func() (any, error) {
+ // NOTE(golangci-lint):
// It looks like there should be !pass.Analyzer.RunDespiteErrors
- // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here,
+ // but govet's cgocall crashes on it.
+ // Govet itself contains !pass.Analyzer.RunDespiteErrors condition here,
// but it exits before it if packages.Load have failed.
- act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg})
- } else {
- startedAt = time.Now()
+ if act.Package.IllTyped {
+ return nil, fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.Package})
+ }
+
+ t1 := time.Now()
- act.result, act.err = pass.Analyzer.Run(pass)
+ result, err := pass.Analyzer.Run(pass)
+ if err != nil {
+ return nil, err
+ }
- analyzedIn := time.Since(startedAt)
- if analyzedIn > time.Millisecond*10 {
+ analyzedIn := time.Since(t1)
+ if analyzedIn > 10*time.Millisecond {
debugf("%s: run analyzer in %s", act, analyzedIn)
}
- }
- // disallow calls after Run
+ // correct result type?
+ if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want {
+ return nil, fmt.Errorf(
+ "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
+ pass.Pkg.Path(), pass.Analyzer, got, want)
+ }
+
+ // resolve diagnostic URLs
+ for i := range act.Diagnostics {
+ url, err := analysisflags.ResolveURL(act.Analyzer, act.Diagnostics[i])
+ if err != nil {
+ return nil, err
+ }
+ act.Diagnostics[i].URL = url
+ }
+ return result, nil
+ }()
+
+ // Help detect (disallowed) calls after Run.
pass.ExportObjectFact = nil
pass.ExportPackageFact = nil
err := act.persistFactsToCache()
if err != nil {
- act.r.log.Warnf("Failed to persist facts to cache: %s", err)
+ act.runner.log.Warnf("Failed to persist facts to cache: %s", err)
}
}
-// NOTE(ldez) altered: logger; serialize.
+// NOTE(ldez) altered: logger; sanityCheck.
// inheritFacts populates act.facts with
// those it obtains from its dependency, dep.
func inheritFacts(act, dep *action) {
- const serialize = false
+ const sanityCheck = false
for key, fact := range dep.objectFacts {
// Filter out facts related to objects
// that are irrelevant downstream
// (equivalently: not in the compiler export data).
- if !exportedFrom(key.obj, dep.pkg.Types) {
+ if !exportedFrom(key.obj, dep.Package.Types) {
factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
continue
}
// Optionally serialize/deserialize fact
// to verify that it works across address spaces.
- if serialize {
+ if sanityCheck {
encodedFact, err := codeFact(fact)
if err != nil {
- act.r.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
+ act.runner.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
}
fact = encodedFact
}
@@ -207,14 +251,26 @@ func inheritFacts(act, dep *action) {
// TODO: filter out facts that belong to
// packages not mentioned in the export data
// to prevent side channels.
+ //
+ // The Pass.All{Object,Package}Facts accessors expose too much:
+ // all facts, of all types, for all dependencies in the action
+ // graph. Not only does the representation grow quadratically,
+ // but it violates the separate compilation paradigm, allowing
+ // analysis implementations to communicate with indirect
+ // dependencies that are not mentioned in the export data.
+ //
+ // It's not clear how to fix this short of a rather expensive
+ // filtering step after each action that enumerates all the
+ // objects that would appear in export data, and deletes
+ // facts associated with objects not in this set.
// Optionally serialize/deserialize fact
// to verify that it works across address spaces
// and is deterministic.
- if serialize {
+ if sanityCheck {
encodedFact, err := codeFact(fact)
if err != nil {
- act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
+ act.runner.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
}
fact = encodedFact
}
@@ -225,7 +281,7 @@ func inheritFacts(act, dep *action) {
}
}
-// NOTE(ldez) no alteration.
+// NOTE(ldez) altered: `new` is renamed to `newFact`.
// codeFact encodes then decodes a fact,
// just to exercise that logic.
func codeFact(fact analysis.Fact) (analysis.Fact, error) {
@@ -259,7 +315,7 @@ func codeFact(fact analysis.Fact) (analysis.Fact, error) {
// This includes not just the exported members of pkg, but also unexported
// constants, types, fields, and methods, perhaps belonging to other packages,
// that find there way into the API.
-// This is an over-approximation of the more accurate approach used by
+// This is an overapproximation of the more accurate approach used by
// gc export data, which walks the type graph, but it's much simpler.
//
// TODO(adonovan): do more accurate filtering by walking the type graph.
@@ -282,11 +338,14 @@ func exportedFrom(obj types.Object, pkg *types.Package) bool {
return false // Nil, Builtin, Label, or PkgName
}
-// NOTE(ldez) altered: logger; `act.factType`
-// importObjectFact implements Pass.ImportObjectFact.
-// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
-// importObjectFact copies the fact value to *ptr.
-func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
+// NOTE(ldez) altered: logger; `act.factType`.
+// ObjectFact retrieves a fact associated with obj,
+// and returns true if one was found.
+// Given a value ptr of type *T, where *T satisfies Fact,
+// ObjectFact copies the value to *ptr.
+//
+// See documentation at ImportObjectFact field of [analysis.Pass].
+func (act *action) ObjectFact(obj types.Object, ptr analysis.Fact) bool {
if obj == nil {
panic("nil object")
}
@@ -298,12 +357,16 @@ func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
return false
}
-// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`.
+// NOTE(ldez) altered: logger; `act.factType`.
// exportObjectFact implements Pass.ExportObjectFact.
func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
- if obj.Pkg() != act.pkg.Types {
- act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
- act.a, act.pkg, obj, fact)
+ if act.pass.ExportObjectFact == nil {
+ act.runner.log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
+ }
+
+ if obj.Pkg() != act.Package.Types {
+ act.runner.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
+ act.Analyzer, act.Package, obj, fact)
}
key := objectFactKey{obj, act.factType(fact)}
@@ -312,12 +375,16 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
objstr := types.ObjectString(obj, (*types.Package).Name)
factsExportDebugf("%s: object %s has fact %s\n",
- act.pkg.Fset.Position(obj.Pos()), objstr, fact)
+ act.Package.Fset.Position(obj.Pos()), objstr, fact)
}
}
// NOTE(ldez) no alteration.
-func (act *action) allObjectFacts() []analysis.ObjectFact {
+// AllObjectFacts returns a new slice containing all object facts of
+// the analysis's FactTypes in unspecified order.
+//
+// See documentation at AllObjectFacts field of [analysis.Pass].
+func (act *action) AllObjectFacts() []analysis.ObjectFact {
facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
for k := range act.objectFacts {
facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]})
@@ -325,11 +392,12 @@ func (act *action) allObjectFacts() []analysis.ObjectFact {
return facts
}
-// NOTE(ldez) altered: `act.factType`
-// importPackageFact implements Pass.ImportPackageFact.
-// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
-// fact copies the fact value to *ptr.
-func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
+// NOTE(ldez) altered: `act.factType`.
+// PackageFact retrieves a fact associated with package pkg,
+// which must be this package or one of its dependencies.
+//
+// See documentation at ImportObjectFact field of [analysis.Pass].
+func (act *action) PackageFact(pkg *types.Package, ptr analysis.Fact) bool {
if pkg == nil {
panic("nil package")
}
@@ -341,30 +409,38 @@ func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool
return false
}
-// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`.
+// NOTE(ldez) altered: logger; `act.factType`.
// exportPackageFact implements Pass.ExportPackageFact.
func (act *action) exportPackageFact(fact analysis.Fact) {
+ if act.pass.ExportPackageFact == nil {
+ act.runner.log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
+ }
+
key := packageFactKey{act.pass.Pkg, act.factType(fact)}
act.packageFacts[key] = fact // clobber any existing entry
factsDebugf("%s: package %s has fact %s\n",
- act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
+ act.Package.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
}
// NOTE(ldez) altered: add receiver to handle logs.
func (act *action) factType(fact analysis.Fact) reflect.Type {
t := reflect.TypeOf(fact)
if t.Kind() != reflect.Ptr {
- act.r.log.Fatalf("invalid Fact type: got %T, want pointer", fact)
+ act.runner.log.Fatalf("invalid Fact type: got %T, want pointer", fact)
}
return t
}
// NOTE(ldez) no alteration.
-func (act *action) allPackageFacts() []analysis.PackageFact {
+// AllPackageFacts returns a new slice containing all package
+// facts of the analysis's FactTypes in unspecified order.
+//
+// See documentation at AllPackageFacts field of [analysis.Pass].
+func (act *action) AllPackageFacts() []analysis.PackageFact {
facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
- for k := range act.packageFacts {
- facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]})
+ for k, fact := range act.packageFacts {
+ facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: fact})
}
return facts
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go
index 44d6769586..fca4b8c3ad 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go
@@ -67,7 +67,7 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) {
// Unblock depending on actions and propagate error.
for _, act := range lp.actions {
close(act.analysisDoneCh)
- act.err = werr
+ act.Err = werr
}
return
}
@@ -125,13 +125,14 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error {
pkg.IllTyped = true
pkg.TypesInfo = &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Instances: make(map[*ast.Ident]types.Instance),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Instances: make(map[*ast.Ident]types.Instance),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ Scopes: make(map[ast.Node]*types.Scope),
+ FileVersions: make(map[*ast.File]string),
}
importer := func(path string) (*types.Package, error) {
@@ -363,12 +364,12 @@ func (lp *loadingPackage) decUse(canClearTypes bool) {
pass.ImportPackageFact = nil
pass.ExportPackageFact = nil
act.pass = nil
- act.deps = nil
- if act.result != nil {
+ act.Deps = nil
+ if act.Result != nil {
if isMemoryDebug {
- debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result))
+ debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.Result))
}
- act.result = nil
+ act.Result = nil
}
}
@@ -399,7 +400,7 @@ func (lp *loadingPackage) decUse(canClearTypes bool) {
for _, act := range lp.actions {
if !lp.isInitial {
- act.pkg = nil
+ act.Package = nil
}
act.packageFacts = nil
act.objectFacts = nil
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go
index a9aee03a2b..3a9a35dec1 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go
@@ -2,6 +2,8 @@ package goanalysis
import (
"fmt"
+ "go/token"
+ "strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
@@ -81,6 +83,7 @@ func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Iss
func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue {
var issues []result.Issue
+
for i := range diags {
diag := &diags[i]
linterName := linterNameBuilder(diag)
@@ -92,11 +95,43 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st
text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message)
}
+ var suggestedFixes []analysis.SuggestedFix
+
+ for _, sf := range diag.SuggestedFixes {
+ // Skip suggested fixes on cgo files.
+ // The related error is: "diff has out-of-bounds edits"
+ // This is a temporary workaround.
+ if !strings.HasSuffix(diag.File.Name(), ".go") {
+ continue
+ }
+
+ nsf := analysis.SuggestedFix{Message: sf.Message}
+
+ for _, edit := range sf.TextEdits {
+ end := edit.End
+
+ if !end.IsValid() {
+ end = edit.Pos
+ }
+
+ // To be applied the positions need to be "adjusted" based on the file.
+ // This is the difference between the "displayed" positions and "effective" positions.
+ nsf.TextEdits = append(nsf.TextEdits, analysis.TextEdit{
+ Pos: token.Pos(diag.File.Offset(edit.Pos)),
+ End: token.Pos(diag.File.Offset(end)),
+ NewText: edit.NewText,
+ })
+ }
+
+ suggestedFixes = append(suggestedFixes, nsf)
+ }
+
issues = append(issues, result.Issue{
- FromLinter: linterName,
- Text: text,
- Pos: diag.Position,
- Pkg: diag.Pkg,
+ FromLinter: linterName,
+ Text: text,
+ Pos: diag.Position,
+ Pkg: diag.Pkg,
+ SuggestedFixes: suggestedFixes,
})
if len(diag.Related) > 0 {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go
index 8c244688b4..4366155b02 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go
@@ -48,7 +48,7 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.
Severity: i.Severity,
Pos: i.Pos,
LineRange: i.LineRange,
- Replacement: i.Replacement,
+ SuggestedFixes: i.SuggestedFixes,
ExpectNoLint: i.ExpectNoLint,
ExpectedNoLintLinter: i.ExpectedNoLintLinter,
})
@@ -123,7 +123,7 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context,
Severity: issue.Severity,
Pos: issue.Pos,
LineRange: issue.LineRange,
- Replacement: issue.Replacement,
+ SuggestedFixes: issue.SuggestedFixes,
Pkg: pkg,
ExpectNoLint: issue.ExpectNoLint,
ExpectedNoLintLinter: issue.ExpectedNoLintLinter,
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go
new file mode 100644
index 0000000000..c8953ad3bf
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go
@@ -0,0 +1,6 @@
+package goformatters
+
+type Formatter interface {
+ Name() string
+ Format(filename string, src []byte) ([]byte, error)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go
new file mode 100644
index 0000000000..bae39f3cee
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go
@@ -0,0 +1,48 @@
+package gci
+
+import (
+ gcicfg "github.com/daixiang0/gci/pkg/config"
+ "github.com/daixiang0/gci/pkg/gci"
+ "github.com/ldez/grignotin/gomod"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+)
+
+const Name = "gci"
+
+type Formatter struct {
+ config *gcicfg.Config
+}
+
+func New(cfg config.GciSettings) (*Formatter, error) {
+ modPath, err := gomod.GetModulePath()
+ if err != nil {
+ return nil, err
+ }
+
+ parsedCfg, err := gcicfg.YamlConfig{
+ Cfg: gcicfg.BoolConfig{
+ NoInlineComments: cfg.NoInlineComments,
+ NoPrefixComments: cfg.NoPrefixComments,
+ SkipGenerated: cfg.SkipGenerated,
+ CustomOrder: cfg.CustomOrder,
+ NoLexOrder: cfg.NoLexOrder,
+ },
+ SectionStrings: cfg.Sections,
+ ModPath: modPath,
+ }.Parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return &Formatter{config: parsedCfg}, nil
+}
+
+func (*Formatter) Name() string {
+ return Name
+}
+
+func (f *Formatter) Format(filename string, src []byte) ([]byte, error) {
+ _, formatted, err := gci.LoadFormat(src, filename, *f.config)
+ return formatted, err
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go
new file mode 100644
index 0000000000..fe2e355590
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go
@@ -0,0 +1,35 @@
+package gofmt
+
+import (
+ "github.com/golangci/gofmt/gofmt"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+)
+
+const Name = "gofmt"
+
+type Formatter struct {
+ options gofmt.Options
+}
+
+func New(cfg config.GoFmtSettings) *Formatter {
+ var rewriteRules []gofmt.RewriteRule
+ for _, rule := range cfg.RewriteRules {
+ rewriteRules = append(rewriteRules, gofmt.RewriteRule(rule))
+ }
+
+ return &Formatter{
+ options: gofmt.Options{
+ NeedSimplify: cfg.Simplify,
+ RewriteRules: rewriteRules,
+ },
+ }
+}
+
+func (*Formatter) Name() string {
+ return Name
+}
+
+func (f *Formatter) Format(filename string, src []byte) ([]byte, error) {
+ return gofmt.Source(filename, src, f.options)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go
new file mode 100644
index 0000000000..f4605e2333
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go
@@ -0,0 +1,43 @@
+package gofumpt
+
+import (
+ "strings"
+
+ gofumpt "mvdan.cc/gofumpt/format"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+)
+
+const Name = "gofumpt"
+
+type Formatter struct {
+ options gofumpt.Options
+}
+
+func New(cfg config.GofumptSettings, goVersion string) *Formatter {
+ return &Formatter{
+ options: gofumpt.Options{
+ LangVersion: getLangVersion(goVersion),
+ ModulePath: cfg.ModulePath,
+ ExtraRules: cfg.ExtraRules,
+ },
+ }
+}
+
+func (*Formatter) Name() string {
+ return Name
+}
+
+func (f *Formatter) Format(_ string, src []byte) ([]byte, error) {
+ return gofumpt.Source(src, f.options)
+}
+
+// modified copy of pkg/golinters/gofumpt/gofumpt.go
+func getLangVersion(v string) string {
+ if v == "" {
+ // TODO: defaults to "1.15", in the future (v2) must be removed.
+ return "go1.15"
+ }
+
+ return "go" + strings.TrimPrefix(v, "go")
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go
new file mode 100644
index 0000000000..add9eb3a61
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go
@@ -0,0 +1,22 @@
+package goimports
+
+import (
+ "golang.org/x/tools/imports"
+)
+
+const Name = "goimports"
+
+type Formatter struct{}
+
+func New() *Formatter {
+ return &Formatter{}
+}
+
+func (*Formatter) Name() string {
+ return Name
+}
+
+func (*Formatter) Format(filename string, src []byte) ([]byte, error) {
+ // The `imports.LocalPrefix` (`settings.LocalPrefixes`) is a global var.
+ return imports.Process(filename, src, nil)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go
new file mode 100644
index 0000000000..e07b3aad80
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go
@@ -0,0 +1,74 @@
+package goformatters
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/goformatters/gci"
+ "github.com/golangci/golangci-lint/pkg/goformatters/gofmt"
+ "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt"
+ "github.com/golangci/golangci-lint/pkg/goformatters/goimports"
+ "github.com/golangci/golangci-lint/pkg/lint/linter"
+ "github.com/golangci/golangci-lint/pkg/logutils"
+)
+
+type MetaFormatter struct {
+ log logutils.Log
+ formatters []Formatter
+}
+
+func NewMetaFormatter(log logutils.Log, cfg *config.Config, enabledLinters map[string]*linter.Config) (*MetaFormatter, error) {
+ m := &MetaFormatter{log: log}
+
+ if _, ok := enabledLinters[gofmt.Name]; ok {
+ m.formatters = append(m.formatters, gofmt.New(cfg.LintersSettings.Gofmt))
+ }
+
+ if _, ok := enabledLinters[gofumpt.Name]; ok {
+ m.formatters = append(m.formatters, gofumpt.New(cfg.LintersSettings.Gofumpt, cfg.Run.Go))
+ }
+
+ if _, ok := enabledLinters[goimports.Name]; ok {
+ m.formatters = append(m.formatters, goimports.New())
+ }
+
+ // gci is a last because the only goal of gci is to handle imports.
+ if _, ok := enabledLinters[gci.Name]; ok {
+ formatter, err := gci.New(cfg.LintersSettings.Gci)
+ if err != nil {
+ return nil, fmt.Errorf("gci: creating formatter: %w", err)
+ }
+
+ m.formatters = append(m.formatters, formatter)
+ }
+
+ return m, nil
+}
+
+func (m *MetaFormatter) Format(filename string, src []byte) []byte {
+ if len(m.formatters) == 0 {
+ data, err := format.Source(src)
+ if err != nil {
+ m.log.Warnf("(fmt) formatting file %s: %v", filename, err)
+ return src
+ }
+
+ return data
+ }
+
+ data := bytes.Clone(src)
+
+ for _, formatter := range m.formatters {
+ formatted, err := formatter.Format(filename, data)
+ if err != nil {
+ m.log.Warnf("(%s) formatting file %s: %v", formatter.Name(), filename, err)
+ continue
+ }
+
+ data = formatted
+ }
+
+ return data
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go
index 653a2d5142..ccc58fee40 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go
@@ -9,12 +9,12 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/internal"
)
-func New(setting *config.AsasalintSettings) *goanalysis.Linter {
+func New(settings *config.AsasalintSettings) *goanalysis.Linter {
cfg := asasalint.LinterSetting{}
- if setting != nil {
- cfg.Exclude = setting.Exclude
- cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions
- cfg.IgnoreTest = setting.IgnoreTest
+ if settings != nil {
+ cfg.Exclude = settings.Exclude
+ cfg.NoBuiltinExclusions = !settings.UseBuiltinExclusions
+ cfg.IgnoreTest = settings.IgnoreTest
}
a, err := asasalint.NewAnalyzer(cfg)
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go
index 4ced901e8f..c6315965c4 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go
@@ -10,42 +10,42 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.BiDiChkSettings) *goanalysis.Linter {
+func New(settings *config.BiDiChkSettings) *goanalysis.Linter {
a := bidichk.NewAnalyzer()
- cfgMap := map[string]map[string]any{}
- if cfg != nil {
+ cfg := map[string]map[string]any{}
+ if settings != nil {
var opts []string
- if cfg.LeftToRightEmbedding {
+ if settings.LeftToRightEmbedding {
opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING")
}
- if cfg.RightToLeftEmbedding {
+ if settings.RightToLeftEmbedding {
opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING")
}
- if cfg.PopDirectionalFormatting {
+ if settings.PopDirectionalFormatting {
opts = append(opts, "POP-DIRECTIONAL-FORMATTING")
}
- if cfg.LeftToRightOverride {
+ if settings.LeftToRightOverride {
opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE")
}
- if cfg.RightToLeftOverride {
+ if settings.RightToLeftOverride {
opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE")
}
- if cfg.LeftToRightIsolate {
+ if settings.LeftToRightIsolate {
opts = append(opts, "LEFT-TO-RIGHT-ISOLATE")
}
- if cfg.RightToLeftIsolate {
+ if settings.RightToLeftIsolate {
opts = append(opts, "RIGHT-TO-LEFT-ISOLATE")
}
- if cfg.FirstStrongIsolate {
+ if settings.FirstStrongIsolate {
opts = append(opts, "FIRST-STRONG-ISOLATE")
}
- if cfg.PopDirectionalIsolate {
+ if settings.PopDirectionalIsolate {
opts = append(opts, "POP-DIRECTIONAL-ISOLATE")
}
- cfgMap[a.Name] = map[string]any{
+ cfg[a.Name] = map[string]any{
"disallowed-runes": strings.Join(opts, ","),
}
}
@@ -54,6 +54,6 @@ func New(cfg *config.BiDiChkSettings) *goanalysis.Linter {
a.Name,
"Checks for dangerous unicode character sequences",
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go
index f39814edc5..c520e88db3 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go
@@ -12,7 +12,7 @@ func New() *goanalysis.Linter {
return goanalysis.NewLinter(
a.Name,
- "checks whether HTTP response body is closed successfully",
+ a.Doc,
[]*analysis.Analyzer{a},
nil,
).WithLoadMode(goanalysis.LoadModeTypesInfo)
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go
index 13baba5a69..772b5601ca 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go
@@ -33,5 +33,5 @@ func New(settings *config.Cyclop) *goanalysis.Linter {
a.Doc,
[]*analysis.Analyzer{a},
cfg,
- ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go
index 49108f4f1f..afa8152fac 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go
@@ -1,41 +1,26 @@
package dogsled
import (
- "fmt"
"go/ast"
- "go/token"
- "sync"
"golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "dogsled"
func New(settings *config.DogsledSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues := runDogsled(pass, settings)
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
- return nil, nil
+ return run(pass, settings.MaxBlankIdentifiers)
},
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
}
return goanalysis.NewLinter(
@@ -43,68 +28,51 @@ func New(settings *config.DogsledSettings) *goanalysis.Linter {
"Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue {
- var reports []goanalysis.Issue
- for _, f := range pass.Files {
- v := &returnsVisitor{
- maxBlanks: settings.MaxBlankIdentifiers,
- f: pass.Fset,
- }
-
- ast.Walk(v, f)
-
- for i := range v.issues {
- reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass))
- }
- }
-
- return reports
-}
-
-type returnsVisitor struct {
- f *token.FileSet
- maxBlanks int
- issues []result.Issue
-}
-
-func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor {
- funcDecl, ok := node.(*ast.FuncDecl)
+func run(pass *analysis.Pass, maxBlanks int) (any, error) {
+ insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
if !ok {
- return v
+ return nil, nil
}
- if funcDecl.Body == nil {
- return v
+
+ nodeFilter := []ast.Node{
+ (*ast.FuncDecl)(nil),
}
- for _, expr := range funcDecl.Body.List {
- assgnStmt, ok := expr.(*ast.AssignStmt)
+ insp.Preorder(nodeFilter, func(node ast.Node) {
+ funcDecl, ok := node.(*ast.FuncDecl)
if !ok {
- continue
+ return
+ }
+
+ if funcDecl.Body == nil {
+ return
}
- numBlank := 0
- for _, left := range assgnStmt.Lhs {
- ident, ok := left.(*ast.Ident)
+ for _, expr := range funcDecl.Body.List {
+ assgnStmt, ok := expr.(*ast.AssignStmt)
if !ok {
continue
}
- if ident.Name == "_" {
- numBlank++
+
+ numBlank := 0
+ for _, left := range assgnStmt.Lhs {
+ ident, ok := left.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if ident.Name == "_" {
+ numBlank++
+ }
}
- }
- if numBlank > v.maxBlanks {
- v.issues = append(v.issues, result.Issue{
- FromLinter: linterName,
- Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank),
- Pos: v.f.Position(assgnStmt.Pos()),
- })
+ if numBlank > maxBlanks {
+ pass.Reportf(assgnStmt.Pos(), "declaration has %v blank identifiers", numBlank)
+ }
}
- }
- return v
+ })
+
+ return nil, nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go
index 7abcb4c4f4..d2bb3d8d84 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go
@@ -54,9 +54,7 @@ func New(settings *config.DuplSettings) *goanalysis.Linter {
}
func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
-
- issues, err := duplAPI.Run(fileNames, settings.Threshold)
+ issues, err := duplAPI.Run(internal.GetGoFileNames(pass), settings.Threshold)
if err != nil {
return nil, err
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go
index bba4fc9e19..a2bcc34d40 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go
@@ -10,14 +10,14 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(setting *config.DupWordSettings) *goanalysis.Linter {
+func New(settings *config.DupWordSettings) *goanalysis.Linter {
a := dupword.NewAnalyzer()
- cfgMap := map[string]map[string]any{}
- if setting != nil {
- cfgMap[a.Name] = map[string]any{
- "keyword": strings.Join(setting.Keywords, ","),
- "ignore": strings.Join(setting.Ignore, ","),
+ cfg := map[string]map[string]any{}
+ if settings != nil {
+ cfg[a.Name] = map[string]any{
+ "keyword": strings.Join(settings.Keywords, ","),
+ "ignore": strings.Join(settings.Ignore, ","),
}
}
@@ -25,6 +25,6 @@ func New(setting *config.DupWordSettings) *goanalysis.Linter {
a.Name,
"checks for duplicate words in the source code",
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go
index 9a8a2aa876..67a1b2ca8d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go
@@ -2,6 +2,7 @@ package errcheck
import (
"bufio"
+ "cmp"
"fmt"
"os"
"os/user"
@@ -90,10 +91,7 @@ func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck
text := "Error return value is not checked"
if err.FuncName != "" {
- code := err.SelectorName
- if err.SelectorName == "" {
- code = err.FuncName
- }
+ code := cmp.Or(err.SelectorName, err.FuncName)
text = fmt.Sprintf("Error return value of %s is not checked", internal.FormatCode(code, lintCtx.Cfg))
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go
index 8389a750c6..506113d6d5 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go
@@ -8,17 +8,17 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter {
+func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter {
a := errchkjson.NewAnalyzer()
- cfgMap := map[string]map[string]any{}
- cfgMap[a.Name] = map[string]any{
+ cfg := map[string]map[string]any{}
+ cfg[a.Name] = map[string]any{
"omit-safe": true,
}
- if cfg != nil {
- cfgMap[a.Name] = map[string]any{
- "omit-safe": !cfg.CheckErrorFreeEncoding,
- "report-no-exported": cfg.ReportNoExported,
+ if settings != nil {
+ cfg[a.Name] = map[string]any{
+ "omit-safe": !settings.CheckErrorFreeEncoding,
+ "report-no-exported": settings.ReportNoExported,
}
}
@@ -26,6 +26,6 @@ func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter {
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go
index 86db8552d0..14851adc28 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go
@@ -8,16 +8,16 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.ErrorLintSettings) *goanalysis.Linter {
+func New(settings *config.ErrorLintSettings) *goanalysis.Linter {
var opts []errorlint.Option
- if cfg != nil {
- ae := toAllowPairs(cfg.AllowedErrors)
+ if settings != nil {
+ ae := toAllowPairs(settings.AllowedErrors)
if len(ae) > 0 {
opts = append(opts, errorlint.WithAllowedErrors(ae))
}
- aew := toAllowPairs(cfg.AllowedErrorsWildcard)
+ aew := toAllowPairs(settings.AllowedErrorsWildcard)
if len(aew) > 0 {
opts = append(opts, errorlint.WithAllowedWildcard(aew))
}
@@ -25,14 +25,14 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter {
a := errorlint.NewAnalyzer(opts...)
- cfgMap := map[string]map[string]any{}
+ cfg := map[string]map[string]any{}
- if cfg != nil {
- cfgMap[a.Name] = map[string]any{
- "errorf": cfg.Errorf,
- "errorf-multi": cfg.ErrorfMulti,
- "asserts": cfg.Asserts,
- "comparison": cfg.Comparison,
+ if settings != nil {
+ cfg[a.Name] = map[string]any{
+ "errorf": settings.Errorf,
+ "errorf-multi": settings.ErrorfMulti,
+ "asserts": settings.Asserts,
+ "comparison": settings.Comparison,
}
}
@@ -41,7 +41,7 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter {
"errorlint is a linter for that can be used to find code "+
"that will cause problems with the error wrapping scheme introduced in Go 1.13.",
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go
new file mode 100644
index 0000000000..2de8ea98c2
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go
@@ -0,0 +1,19 @@
+package exptostd
+
+import (
+ "github.com/ldez/exptostd"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
+)
+
+func New() *goanalysis.Linter {
+ a := exptostd.NewAnalyzer()
+
+ return goanalysis.NewLinter(
+ a.Name,
+ a.Doc,
+ []*analysis.Analyzer{a},
+ nil,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go
index 3572b60c23..3b410359d0 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go
@@ -2,40 +2,27 @@ package forbidigo
import (
"fmt"
- "sync"
"github.com/ashanbrown/forbidigo/forbidigo"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/logutils"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "forbidigo"
func New(settings *config.ForbidigoSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runForbidigo(pass, settings)
+ err := runForbidigo(pass, settings)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
return nil, nil
},
}
@@ -48,12 +35,10 @@ func New(settings *config.ForbidigoSettings) *goanalysis.Linter {
"Forbids identifiers",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeTypesInfo)
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
-func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) {
+func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error {
options := []forbidigo.Option{
forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples),
// disable "//permit" directives so only "//nolint" directives matters within golangci-lint
@@ -66,38 +51,39 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]go
for _, pattern := range settings.Forbid {
buffer, err := pattern.MarshalString()
if err != nil {
- return nil, err
+ return err
}
+
patterns = append(patterns, string(buffer))
}
forbid, err := forbidigo.NewLinter(patterns, options...)
if err != nil {
- return nil, fmt.Errorf("failed to create linter %q: %w", linterName, err)
+ return fmt.Errorf("failed to create linter %q: %w", linterName, err)
}
- var issues []goanalysis.Issue
for _, file := range pass.Files {
runConfig := forbidigo.RunConfig{
Fset: pass.Fset,
DebugLog: logutils.Debug(logutils.DebugKeyForbidigo),
}
- if settings != nil && settings.AnalyzeTypes {
+
+ if settings.AnalyzeTypes {
runConfig.TypesInfo = pass.TypesInfo
}
+
hints, err := forbid.RunWithConfig(runConfig, file)
if err != nil {
- return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err)
+ return fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err)
}
for _, hint := range hints {
- issues = append(issues, goanalysis.NewIssue(&result.Issue{
- Pos: hint.Position(),
- Text: hint.Details(),
- FromLinter: linterName,
- }, pass))
+ pass.Report(analysis.Diagnostic{
+ Pos: hint.Pos(),
+ Message: hint.Details(),
+ })
}
}
- return issues, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go
index e43339394d..bdadcece46 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go
@@ -1,75 +1,33 @@
package funlen
import (
- "go/token"
- "strings"
- "sync"
-
"github.com/ultraware/funlen"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
-const linterName = "funlen"
+type Config struct {
+ lineLimit int
+ stmtLimit int
+ ignoreComments bool
+}
func New(settings *config.FunlenSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
- analyzer := &analysis.Analyzer{
- Name: linterName,
- Doc: goanalysis.TheOnlyanalyzerDoc,
- Run: func(pass *analysis.Pass) (any, error) {
- issues := runFunlen(pass, settings)
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
- return nil, nil
- },
+ cfg := Config{}
+ if settings != nil {
+ cfg.lineLimit = settings.Lines
+ cfg.stmtLimit = settings.Statements
+ cfg.ignoreComments = !settings.IgnoreComments
}
+ a := funlen.NewAnalyzer(cfg.lineLimit, cfg.stmtLimit, cfg.ignoreComments)
+
return goanalysis.NewLinter(
- linterName,
- "Tool for detection of long functions",
- []*analysis.Analyzer{analyzer},
+ a.Name,
+ a.Doc,
+ []*analysis.Analyzer{a},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
-}
-
-func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue {
- var lintIssues []funlen.Message
- for _, file := range pass.Files {
- fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments)
- lintIssues = append(lintIssues, fileIssues...)
- }
-
- if len(lintIssues) == 0 {
- return nil
- }
-
- issues := make([]goanalysis.Issue, len(lintIssues))
- for k, i := range lintIssues {
- issues[k] = goanalysis.NewIssue(&result.Issue{
- Pos: token.Position{
- Filename: i.Pos.Filename,
- Line: i.Pos.Line,
- },
- Text: strings.TrimRight(i.Message, "\n"),
- FromLinter: linterName,
- }, pass)
- }
-
- return issues
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go
index a9afb6c893..841ee81b0d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go
@@ -1,20 +1,15 @@
package gci
import (
+ "bytes"
"fmt"
- "sort"
- "strings"
- "sync"
+ "io"
+ "os"
gcicfg "github.com/daixiang0/gci/pkg/config"
"github.com/daixiang0/gci/pkg/gci"
- "github.com/daixiang0/gci/pkg/io"
"github.com/daixiang0/gci/pkg/log"
- "github.com/daixiang0/gci/pkg/section"
- "github.com/golangci/modinfo"
- "github.com/hexops/gotextdiff"
- "github.com/hexops/gotextdiff/myers"
- "github.com/hexops/gotextdiff/span"
+ "github.com/shazow/go-diff/difflib"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
@@ -25,226 +20,97 @@ import (
const linterName = "gci"
+type differ interface {
+ Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error
+}
+
func New(settings *config.GciSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
+ log.InitLogger()
+ _ = log.L().Sync()
- analyzer := &analysis.Analyzer{
+ diff := difflib.New()
+
+ a := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: goanalysis.DummyRun,
- Requires: []*analysis.Analyzer{
- modinfo.Analyzer,
- },
}
- var cfg *gcicfg.Config
- if settings != nil {
- rawCfg := gcicfg.YamlConfig{
- Cfg: gcicfg.BoolConfig{
- SkipGenerated: settings.SkipGenerated,
- CustomOrder: settings.CustomOrder,
- NoLexOrder: settings.NoLexOrder,
- },
- SectionStrings: settings.Sections,
- }
-
- if settings.LocalPrefixes != "" {
- prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)}
- rawCfg.SectionStrings = prefix
- }
-
- var err error
- cfg, err = YamlConfig{origin: rawCfg}.Parse()
- if err != nil {
- internal.LinterLogger.Fatalf("gci: configuration parsing: %v", err)
- }
- }
-
- var lock sync.Mutex
-
return goanalysis.NewLinter(
linterName,
- "Gci controls Go package import order and makes it always deterministic.",
- []*analysis.Analyzer{analyzer},
+ "Checks if code and import statements are formatted, it makes import statements always deterministic.",
+ []*analysis.Analyzer{a},
nil,
).WithContextSetter(func(lintCtx *linter.Context) {
- analyzer.Run = func(pass *analysis.Pass) (any, error) {
- var err error
- cfg.Sections, err = hackSectionList(pass, cfg)
+ a.Run = func(pass *analysis.Pass) (any, error) {
+ err := run(lintCtx, pass, settings, diff)
if err != nil {
return nil, err
}
- issues, err := runGci(pass, lintCtx, cfg, &lock)
- if err != nil {
- return nil, err
- }
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
}
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
}).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
+func run(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GciSettings, diff differ) error {
+ cfg := gcicfg.YamlConfig{
+ Cfg: gcicfg.BoolConfig{
+ NoInlineComments: settings.NoInlineComments,
+ NoPrefixComments: settings.NoPrefixComments,
+ SkipGenerated: settings.SkipGenerated,
+ CustomOrder: settings.CustomOrder,
+ NoLexOrder: settings.NoLexOrder,
+ },
+ SectionStrings: settings.Sections,
+ ModPath: pass.Module.Path,
+ }
- var diffs []string
- err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock)
- if err != nil {
- return nil, err
+ if settings.LocalPrefixes != "" {
+ cfg.SectionStrings = []string{
+ "standard",
+ "default",
+ fmt.Sprintf("prefix(%s)", settings.LocalPrefixes),
+ }
}
- var issues []goanalysis.Issue
+ parsedCfg, err := cfg.Parse()
+ if err != nil {
+ return err
+ }
- for _, diff := range diffs {
- if diff == "" {
+ for _, file := range pass.Files {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
continue
}
- is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGci)
+ input, err := os.ReadFile(position.Filename)
if err != nil {
- return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err)
- }
-
- for i := range is {
- issues = append(issues, goanalysis.NewIssue(&is[i], pass))
+ return fmt.Errorf("unable to open file %s: %w", position.Filename, err)
}
- }
-
- return issues, nil
-}
-
-func getIssuedTextGci(settings *config.LintersSettings) string {
- text := "File is not `gci`-ed"
- hasOptions := settings.Gci.SkipGenerated || len(settings.Gci.Sections) > 0
- if !hasOptions {
- return text
- }
-
- text += " with"
-
- if settings.Gci.SkipGenerated {
- text += " --skip-generated"
- }
-
- if len(settings.Gci.Sections) > 0 {
- for _, sect := range settings.Gci.Sections {
- text += " -s " + sect
+ _, output, err := gci.LoadFormat(input, position.Filename, *parsedCfg)
+ if err != nil {
+ return fmt.Errorf("error while running gci: %w", err)
}
- }
- if settings.Gci.CustomOrder {
- text += " --custom-order"
- }
+ if !bytes.Equal(input, output) {
+ out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", position.Filename))
- return text
-}
-
-func hackSectionList(pass *analysis.Pass, cfg *gcicfg.Config) (section.SectionList, error) {
- var sections section.SectionList
-
- for _, sect := range cfg.Sections {
- // local module hack
- if v, ok := sect.(*section.LocalModule); ok {
- info, err := modinfo.FindModuleFromPass(pass)
+ err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output))
if err != nil {
- return nil, err
- }
-
- if info.Path == "" {
- continue
+ return fmt.Errorf("error while running gci: %w", err)
}
- v.Path = info.Path
- }
-
- sections = append(sections, sect)
- }
-
- return sections, nil
-}
-
-// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator.
-// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI.
-// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75
-// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80
-func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error {
- log.InitLogger()
- defer func() { _ = log.L().Sync() }()
-
- return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error {
- fileURI := span.URIFromPath(filePath)
- edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile))
- unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits)
- lock.Lock()
- *diffs = append(*diffs, fmt.Sprint(unifiedEdits))
- lock.Unlock()
- return nil
- })
-}
-
-// Code below this comment is borrowed and modified from gci.
-// https://github.com/daixiang0/gci/blob/v0.13.5/pkg/config/config.go
-
-var defaultOrder = map[string]int{
- section.StandardType: 0,
- section.DefaultType: 1,
- section.CustomType: 2,
- section.BlankType: 3,
- section.DotType: 4,
- section.AliasType: 5,
- section.LocalModuleType: 6,
-}
-
-type YamlConfig struct {
- origin gcicfg.YamlConfig
-}
-
-//nolint:gocritic // code borrowed from gci and modified to fix LocalModule section behavior.
-func (g YamlConfig) Parse() (*gcicfg.Config, error) {
- var err error
-
- sections, err := section.Parse(g.origin.SectionStrings)
- if err != nil {
- return nil, err
- }
-
- if sections == nil {
- sections = section.DefaultSections()
- }
-
- // if default order sorted sections
- if !g.origin.Cfg.CustomOrder {
- sort.Slice(sections, func(i, j int) bool {
- sectionI, sectionJ := sections[i].Type(), sections[j].Type()
+ diff := out.String()
- if g.origin.Cfg.NoLexOrder || strings.Compare(sectionI, sectionJ) != 0 {
- return defaultOrder[sectionI] < defaultOrder[sectionJ]
+ err = internal.ExtractDiagnosticFromPatch(pass, file, diff, lintCtx)
+ if err != nil {
+ return fmt.Errorf("can't extract issues from gci diff output %q: %w", diff, err)
}
-
- return strings.Compare(sections[i].String(), sections[j].String()) < 0
- })
- }
-
- sectionSeparators, err := section.Parse(g.origin.SectionSeparatorStrings)
- if err != nil {
- return nil, err
- }
- if sectionSeparators == nil {
- sectionSeparators = section.DefaultSectionSeparators()
+ }
}
- return &gcicfg.Config{BoolConfig: g.origin.Cfg, Sections: sections, SectionSeparators: sectionSeparators}, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go
index 9873c9ba49..6826b77b6b 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go
@@ -14,18 +14,18 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter {
if settings != nil {
cfg = &types.Config{
- SuppressLen: types.Boolean(settings.SuppressLenAssertion),
- SuppressNil: types.Boolean(settings.SuppressNilAssertion),
- SuppressErr: types.Boolean(settings.SuppressErrAssertion),
- SuppressCompare: types.Boolean(settings.SuppressCompareAssertion),
- SuppressAsync: types.Boolean(settings.SuppressAsyncAssertion),
- ForbidFocus: types.Boolean(settings.ForbidFocusContainer),
- SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning),
- AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero),
- ForceExpectTo: types.Boolean(settings.ForceExpectTo),
- ValidateAsyncIntervals: types.Boolean(settings.ValidateAsyncIntervals),
- ForbidSpecPollution: types.Boolean(settings.ForbidSpecPollution),
- ForceSucceedForFuncs: types.Boolean(settings.ForceSucceedForFuncs),
+ SuppressLen: settings.SuppressLenAssertion,
+ SuppressNil: settings.SuppressNilAssertion,
+ SuppressErr: settings.SuppressErrAssertion,
+ SuppressCompare: settings.SuppressCompareAssertion,
+ SuppressAsync: settings.SuppressAsyncAssertion,
+ ForbidFocus: settings.ForbidFocusContainer,
+ SuppressTypeCompare: settings.SuppressTypeCompareWarning,
+ AllowHaveLen0: settings.AllowHaveLenZero,
+ ForceExpectTo: settings.ForceExpectTo,
+ ValidateAsyncIntervals: settings.ValidateAsyncIntervals,
+ ForbidSpecPollution: settings.ForbidSpecPollution,
+ ForceSucceedForFuncs: settings.ForceSucceedForFuncs,
}
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go
index 1345eb8c29..510a06c91d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go
@@ -1,46 +1,24 @@
package gochecknoinits
import (
- "fmt"
"go/ast"
- "go/token"
- "sync"
"golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
"github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/golinters/internal"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "gochecknoinits"
func New() *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
- Name: linterName,
- Doc: goanalysis.TheOnlyanalyzerDoc,
- Run: func(pass *analysis.Pass) (any, error) {
- var res []goanalysis.Issue
- for _, file := range pass.Files {
- fileIssues := checkFileForInits(file, pass.Fset)
- for i := range fileIssues {
- res = append(res, goanalysis.NewIssue(&fileIssues[i], pass))
- }
- }
- if len(res) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, res...)
- mu.Unlock()
-
- return nil, nil
- },
+ Name: linterName,
+ Doc: goanalysis.TheOnlyanalyzerDoc,
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
}
return goanalysis.NewLinter(
@@ -48,28 +26,30 @@ func New() *goanalysis.Linter {
"Checks that no init functions are present in Go code",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func checkFileForInits(f *ast.File, fset *token.FileSet) []result.Issue {
- var res []result.Issue
- for _, decl := range f.Decls {
+func run(pass *analysis.Pass) (any, error) {
+ insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if !ok {
+ return nil, nil
+ }
+
+ nodeFilter := []ast.Node{
+ (*ast.FuncDecl)(nil),
+ }
+
+ insp.Preorder(nodeFilter, func(decl ast.Node) {
funcDecl, ok := decl.(*ast.FuncDecl)
if !ok {
- continue
+ return
}
fnName := funcDecl.Name.Name
if fnName == "init" && funcDecl.Recv.NumFields() == 0 {
- res = append(res, result.Issue{
- Pos: fset.Position(funcDecl.Pos()),
- Text: fmt.Sprintf("don't use %s function", internal.FormatCode(fnName, nil)),
- FromLinter: linterName,
- })
+ pass.Reportf(funcDecl.Pos(), "don't use %s function", internal.FormatCode(fnName, nil))
}
- }
+ })
- return res
+ return nil, nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go
index 7aab0efebb..cbc5873126 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go
@@ -61,9 +61,13 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti
TypesInfo: pass.TypesInfo,
}
+ cfg := gochecksumtype.Config{
+ DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive,
+ IncludeSharedInterfaces: settings.IncludeSharedInterfaces,
+ }
+
var unknownError error
- errors := gochecksumtype.Run([]*packages.Package{pkg},
- gochecksumtype.Config{DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive})
+ errors := gochecksumtype.Run([]*packages.Package{pkg}, cfg)
for _, err := range errors {
err, ok := err.(gochecksumtype.Error)
if !ok {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go
index 194ea35355..087ddc1df0 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go
@@ -5,7 +5,6 @@ import (
"fmt"
"go/ast"
"go/types"
- "path/filepath"
"reflect"
"runtime"
"slices"
@@ -23,7 +22,6 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/logutils"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "gocritic"
@@ -34,9 +32,6 @@ var (
)
func New(settings *config.GoCriticSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
wrapper := &goCriticWrapper{
sizes: types.SizesFor("gc", runtime.GOARCH),
}
@@ -45,19 +40,11 @@ func New(settings *config.GoCriticSettings) *goanalysis.Linter {
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := wrapper.run(pass)
+ err := wrapper.run(pass)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
},
}
@@ -75,9 +62,6 @@ Dynamic rules are written declaratively with AST patterns, filters, report messa
wrapper.init(context.Log, settings)
}).
- WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).
WithLoadMode(goanalysis.LoadModeTypesInfo)
}
@@ -111,9 +95,9 @@ func (w *goCriticWrapper) init(logger logutils.Log, settings *config.GoCriticSet
w.settingsWrapper = settingsWrapper
}
-func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) {
+func (w *goCriticWrapper) run(pass *analysis.Pass) error {
if w.settingsWrapper == nil {
- return nil, errors.New("the settings wrapper is nil")
+ return errors.New("the settings wrapper is nil")
}
linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes)
@@ -122,19 +106,14 @@ func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) {
enabledCheckers, err := w.buildEnabledCheckers(linterCtx)
if err != nil {
- return nil, err
+ return err
}
linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg)
- pkgIssues := runOnPackage(linterCtx, enabledCheckers, pass.Files)
+ runOnPackage(pass, enabledCheckers, pass.Files)
- issues := make([]goanalysis.Issue, 0, len(pkgIssues))
- for i := range pkgIssues {
- issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass))
- }
-
- return issues, nil
+ return nil
}
func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) {
@@ -214,47 +193,36 @@ func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any {
}
}
-func runOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue {
- var res []result.Issue
+func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files []*ast.File) {
for _, f := range files {
- filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename)
- linterCtx.SetFileInfo(filename, f)
-
- issues := runOnFile(linterCtx, f, checks)
- res = append(res, issues...)
+ runOnFile(pass, f, checks)
}
- return res
}
-func runOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue {
- var res []result.Issue
-
+func runOnFile(pass *analysis.Pass, f *ast.File, checks []*gocriticlinter.Checker) {
for _, c := range checks {
// All checkers are expected to use *lint.Context
// as read-only structure, so no copying is required.
for _, warn := range c.Check(f) {
- pos := linterCtx.FileSet.Position(warn.Pos)
- issue := result.Issue{
- Pos: pos,
- Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text),
- FromLinter: linterName,
+ diag := analysis.Diagnostic{
+ Pos: warn.Pos,
+ Category: c.Info.Name,
+ Message: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text),
}
if warn.HasQuickFix() {
- issue.Replacement = &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: pos.Column - 1,
- Length: int(warn.Suggestion.To - warn.Suggestion.From),
- NewString: string(warn.Suggestion.Replacement),
- },
- }
+ diag.SuggestedFixes = []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: warn.Suggestion.From,
+ End: warn.Suggestion.To,
+ NewText: warn.Suggestion.Replacement,
+ }},
+ }}
}
- res = append(res, issue)
+ pass.Report(diag)
}
}
-
- return res
}
type goCriticChecks[T any] map[string]T
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go
index fc51b5bb8c..3194b3d3ac 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go
@@ -1,23 +1,18 @@
package godot
import (
- "sync"
+ "cmp"
"github.com/tetafro/godot"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "godot"
func New(settings *config.GodotSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
var dotSettings godot.Settings
if settings != nil {
@@ -29,32 +24,22 @@ func New(settings *config.GodotSettings) *goanalysis.Linter {
}
// Convert deprecated setting
- if settings.CheckAll {
+ if settings.CheckAll != nil && *settings.CheckAll {
dotSettings.Scope = godot.AllScope
}
}
- if dotSettings.Scope == "" {
- dotSettings.Scope = godot.DeclScope
- }
+ dotSettings.Scope = cmp.Or(dotSettings.Scope, godot.DeclScope)
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runGodot(pass, dotSettings)
+ err := runGodot(pass, dotSettings)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
},
}
@@ -64,38 +49,40 @@ func New(settings *config.GodotSettings) *goanalysis.Linter {
"Check if comments end in a period",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) {
- var lintIssues []godot.Issue
+func runGodot(pass *analysis.Pass, settings godot.Settings) error {
for _, file := range pass.Files {
iss, err := godot.Run(file, pass.Fset, settings)
if err != nil {
- return nil, err
+ return err
}
- lintIssues = append(lintIssues, iss...)
- }
-
- if len(lintIssues) == 0 {
- return nil, nil
- }
- issues := make([]goanalysis.Issue, len(lintIssues))
- for k, i := range lintIssues {
- issue := result.Issue{
- Pos: i.Pos,
- Text: i.Message,
- FromLinter: linterName,
- Replacement: &result.Replacement{
- NewLines: []string{i.Replacement},
- },
+ if len(iss) == 0 {
+ continue
}
- issues[k] = goanalysis.NewIssue(&issue, pass)
+ f := pass.Fset.File(file.Pos())
+
+ for _, i := range iss {
+ start := f.Pos(i.Pos.Offset)
+ end := goanalysis.EndOfLinePos(f, i.Pos.Line)
+
+ pass.Report(analysis.Diagnostic{
+ Pos: start,
+ End: end,
+ Message: i.Message,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: start,
+ End: end,
+ NewText: []byte(i.Replacement),
+ }},
+ }},
+ })
+ }
}
- return issues, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go
index d8de026baf..181e0a73ab 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go
@@ -3,36 +3,22 @@ package godox
import (
"go/token"
"strings"
- "sync"
"github.com/matoous/godox"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "godox"
func New(settings *config.GodoxSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues := runGodox(pass, settings)
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
+ runGodox(pass, settings)
return nil, nil
},
@@ -43,33 +29,30 @@ func New(settings *config.GodoxSettings) *goanalysis.Linter {
"Tool for detection of FIXME, TODO and other comment keywords",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue {
- var messages []godox.Message
+func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) {
for _, file := range pass.Files {
- messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...)
- }
-
- if len(messages) == 0 {
- return nil
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ continue
+ }
+
+ messages := godox.Run(file, pass.Fset, settings.Keywords...)
+ if len(messages) == 0 {
+ continue
+ }
+
+ nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false)
+
+ ft := pass.Fset.File(file.Pos())
+
+ for _, i := range messages {
+ pass.Report(analysis.Diagnostic{
+ Pos: ft.LineStart(goanalysis.AdjustPos(i.Pos.Line, nonAdjPosition.Line, position.Line)) + token.Pos(i.Pos.Column),
+ Message: strings.TrimRight(i.Message, "\n"),
+ })
+ }
}
-
- issues := make([]goanalysis.Issue, len(messages))
-
- for k, i := range messages {
- issues[k] = goanalysis.NewIssue(&result.Issue{
- Pos: token.Position{
- Filename: i.Pos.Filename,
- Line: i.Pos.Line,
- },
- Text: strings.TrimRight(i.Message, "\n"),
- FromLinter: linterName,
- }, pass)
- }
-
- return issues
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go
index 289ceab8ae..b6531d5314 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go
@@ -2,7 +2,6 @@ package gofmt
import (
"fmt"
- "sync"
gofmtAPI "github.com/golangci/gofmt/gofmt"
"golang.org/x/tools/go/analysis"
@@ -16,9 +15,6 @@ import (
const linterName = "gofmt"
func New(settings *config.GoFmtSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
@@ -27,72 +23,46 @@ func New(settings *config.GoFmtSettings) *goanalysis.Linter {
return goanalysis.NewLinter(
linterName,
- "Gofmt checks whether code was gofmt-ed. By default "+
- "this tool runs with -s option to check for code simplification",
+ "Checks if the code is formatted according to 'gofmt' command.",
[]*analysis.Analyzer{analyzer},
nil,
).WithContextSetter(func(lintCtx *linter.Context) {
analyzer.Run = func(pass *analysis.Pass) (any, error) {
- issues, err := runGofmt(lintCtx, pass, settings)
+ err := runGofmt(lintCtx, pass, settings)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
}
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
}).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
-
+func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) error {
var rewriteRules []gofmtAPI.RewriteRule
for _, rule := range settings.RewriteRules {
rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule))
}
- var issues []goanalysis.Issue
+ for _, file := range pass.Files {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ continue
+ }
- for _, f := range fileNames {
- diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules)
+ diff, err := gofmtAPI.RunRewrite(position.Filename, settings.Simplify, rewriteRules)
if err != nil { // TODO: skip
- return nil, err
+ return err
}
if diff == nil {
continue
}
- is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoFmt)
+ err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx)
if err != nil {
- return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err)
+ return fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err)
}
-
- for i := range is {
- issues = append(issues, goanalysis.NewIssue(&is[i], pass))
- }
- }
-
- return issues, nil
-}
-
-func getIssuedTextGoFmt(settings *config.LintersSettings) string {
- text := "File is not `gofmt`-ed"
- if settings.Gofmt.Simplify {
- text += " with `-s`"
- }
- for _, rule := range settings.Gofmt.RewriteRules {
- text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement)
}
- return text
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go
index 3bb7df12e2..7a11a9074d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go
@@ -6,7 +6,6 @@ import (
"io"
"os"
"strings"
- "sync"
"github.com/shazow/go-diff/difflib"
"golang.org/x/tools/go/analysis"
@@ -25,9 +24,6 @@ type differ interface {
}
func New(settings *config.GofumptSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
diff := difflib.New()
var options format.Options
@@ -48,68 +44,56 @@ func New(settings *config.GofumptSettings) *goanalysis.Linter {
return goanalysis.NewLinter(
linterName,
- "Gofumpt checks whether code was gofumpt-ed.",
+ "Checks if code and import statements are formatted, with additional rules.",
[]*analysis.Analyzer{analyzer},
nil,
).WithContextSetter(func(lintCtx *linter.Context) {
analyzer.Run = func(pass *analysis.Pass) (any, error) {
- issues, err := runGofumpt(lintCtx, pass, diff, options)
+ err := runGofumpt(lintCtx, pass, diff, options)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
}
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
}).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
-
- var issues []goanalysis.Issue
+func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) error {
+ for _, file := range pass.Files {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ continue
+ }
- for _, f := range fileNames {
- input, err := os.ReadFile(f)
+ input, err := os.ReadFile(position.Filename)
if err != nil {
- return nil, fmt.Errorf("unable to open file %s: %w", f, err)
+ return fmt.Errorf("unable to open file %s: %w", position.Filename, err)
}
output, err := format.Source(input, options)
if err != nil {
- return nil, fmt.Errorf("error while running gofumpt: %w", err)
+ return fmt.Errorf("error while running gofumpt: %w", err)
}
if !bytes.Equal(input, output) {
- out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f))
+ out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", position.Filename))
err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output))
if err != nil {
- return nil, fmt.Errorf("error while running gofumpt: %w", err)
+ return fmt.Errorf("error while running gofumpt: %w", err)
}
diff := out.String()
- is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGoFumpt)
- if err != nil {
- return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err)
- }
- for i := range is {
- issues = append(issues, goanalysis.NewIssue(&is[i], pass))
+ err = internal.ExtractDiagnosticFromPatch(pass, file, diff, lintCtx)
+ if err != nil {
+ return fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err)
}
}
}
- return issues, nil
+ return nil
}
func getLangVersion(settings *config.GofumptSettings) string {
@@ -120,13 +104,3 @@ func getLangVersion(settings *config.GofumptSettings) string {
return "go" + strings.TrimPrefix(settings.LangVersion, "go")
}
-
-func getIssuedTextGoFumpt(settings *config.LintersSettings) string {
- text := "File is not `gofumpt`-ed"
-
- if settings.Gofumpt.ExtraRules {
- text += " with `-extra`"
- }
-
- return text
-}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go
index c6b1aae6be..5043381143 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go
@@ -2,23 +2,18 @@ package goheader
import (
"go/token"
- "sync"
+ "strings"
goheader "github.com/denis-tingaikin/go-header"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "goheader"
func New(settings *config.GoHeaderSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
conf := &goheader.Configuration{}
if settings != nil {
conf = &goheader.Configuration{
@@ -32,19 +27,11 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter {
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runGoHeader(pass, conf)
+ err := runGoHeader(pass, conf)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
},
}
@@ -54,62 +41,91 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter {
"Checks if file header matches to pattern",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) {
+func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) error {
if conf.TemplatePath == "" && conf.Template == "" {
// User did not pass template, so then do not run go-header linter
- return nil, nil
+ return nil
}
template, err := conf.GetTemplate()
if err != nil {
- return nil, err
+ return err
}
values, err := conf.GetValues()
if err != nil {
- return nil, err
+ return err
}
a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values))
- var issues []goanalysis.Issue
for _, file := range pass.Files {
- path := pass.Fset.Position(file.Pos()).Filename
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ continue
+ }
+
+ issue := a.Analyze(&goheader.Target{File: file, Path: position.Filename})
+ if issue == nil {
+ continue
+ }
- i := a.Analyze(&goheader.Target{File: file, Path: path})
+ f := pass.Fset.File(file.Pos())
- if i == nil {
+ commentLine := 1
+ var offset int
+
+ // Inspired by https://github.com/denis-tingaikin/go-header/blob/4c75a6a2332f025705325d6c71fff4616aedf48f/analyzer.go#L85-L92
+ if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package {
+ if !strings.HasPrefix(file.Comments[0].List[0].Text, "/*") {
+ // When the comment is "//" there is a one character offset.
+ offset = 1
+ }
+ commentLine = goanalysis.GetFilePositionFor(pass.Fset, file.Comments[0].Pos()).Line
+ }
+
+ // Skip issues related to build directives.
+ // https://github.com/denis-tingaikin/go-header/issues/18
+ if issue.Location().Position-offset < 0 {
continue
}
- issue := result.Issue{
- Pos: token.Position{
- Line: i.Location().Line + 1,
- Column: i.Location().Position,
- Filename: path,
- },
- Text: i.Message(),
- FromLinter: linterName,
+ diag := analysis.Diagnostic{
+ Pos: f.LineStart(issue.Location().Line+1) + token.Pos(issue.Location().Position-offset), // The position of the first divergence.
+ Message: issue.Message(),
}
- if fix := i.Fix(); fix != nil {
- issue.LineRange = &result.Range{
- From: issue.Line(),
- To: issue.Line() + len(fix.Actual) - 1,
+ if fix := issue.Fix(); fix != nil {
+ current := len(fix.Actual)
+ for _, s := range fix.Actual {
+ current += len(s)
}
- issue.Replacement = &result.Replacement{
- NeedOnlyDelete: len(fix.Expected) == 0,
- NewLines: fix.Expected,
+
+ start := f.LineStart(commentLine)
+
+ end := start + token.Pos(current)
+
+ header := strings.Join(fix.Expected, "\n") + "\n"
+
+ // Adds an extra line between the package and the header.
+ if end == file.Package {
+ header += "\n"
}
+
+ diag.SuggestedFixes = []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: start,
+ End: end,
+ NewText: []byte(header),
+ }},
+ }}
}
- issues = append(issues, goanalysis.NewIssue(&issue, pass))
+ pass.Report(diag)
}
- return issues, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go
index de965d5c85..6ddc9a75b1 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go
@@ -2,7 +2,6 @@ package goimports
import (
"fmt"
- "sync"
goimportsAPI "github.com/golangci/gofmt/goimports"
"golang.org/x/tools/go/analysis"
@@ -17,9 +16,6 @@ import (
const linterName = "goimports"
func New(settings *config.GoImportsSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
@@ -28,67 +24,43 @@ func New(settings *config.GoImportsSettings) *goanalysis.Linter {
return goanalysis.NewLinter(
linterName,
- "Check import statements are formatted according to the 'goimport' command. "+
- "Reformat imports in autofix mode.",
+ "Checks if the code and import statements are formatted according to the 'goimports' command.",
[]*analysis.Analyzer{analyzer},
nil,
).WithContextSetter(func(lintCtx *linter.Context) {
imports.LocalPrefix = settings.LocalPrefixes
analyzer.Run = func(pass *analysis.Pass) (any, error) {
- issues, err := runGoImports(lintCtx, pass)
+ err := runGoImports(lintCtx, pass)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
}
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
}).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
-
- var issues []goanalysis.Issue
+func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) error {
+ for _, file := range pass.Files {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ continue
+ }
- for _, f := range fileNames {
- diff, err := goimportsAPI.Run(f)
+ diff, err := goimportsAPI.Run(position.Filename)
if err != nil { // TODO: skip
- return nil, err
+ return err
}
if diff == nil {
continue
}
- is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoImports)
+ err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx)
if err != nil {
- return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err)
+ return fmt.Errorf("can't extract issues from goimports diff output %q: %w", string(diff), err)
}
-
- for i := range is {
- issues = append(issues, goanalysis.NewIssue(&is[i], pass))
- }
- }
-
- return issues, nil
-}
-
-func getIssuedTextGoImports(settings *config.LintersSettings) string {
- text := "File is not `goimports`-ed"
-
- if settings.Goimports.LocalPrefixes != "" {
- text += " with -local " + settings.Goimports.LocalPrefixes
}
- return text
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go
index 9cde7e26c6..f8f47ba2b4 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go
@@ -1,6 +1,7 @@
package gomoddirectives
import (
+ "regexp"
"sync"
"github.com/ldez/gomoddirectives"
@@ -8,6 +9,7 @@ import (
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
+ "github.com/golangci/golangci-lint/pkg/golinters/internal"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/result"
)
@@ -24,6 +26,27 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter {
opts.ReplaceAllowList = settings.ReplaceAllowList
opts.RetractAllowNoExplanation = settings.RetractAllowNoExplanation
opts.ExcludeForbidden = settings.ExcludeForbidden
+ opts.ToolchainForbidden = settings.ToolchainForbidden
+ opts.ToolForbidden = settings.ToolForbidden
+ opts.GoDebugForbidden = settings.GoDebugForbidden
+
+ if settings.ToolchainPattern != "" {
+ exp, err := regexp.Compile(settings.ToolchainPattern)
+ if err != nil {
+ internal.LinterLogger.Fatalf("%s: invalid toolchain pattern: %v", linterName, err)
+ } else {
+ opts.ToolchainPattern = exp
+ }
+ }
+
+ if settings.GoVersionPattern != "" {
+ exp, err := regexp.Compile(settings.GoVersionPattern)
+ if err != nil {
+ internal.LinterLogger.Fatalf("%s: invalid Go version pattern: %v", linterName, err)
+ } else {
+ opts.GoVersionPattern = exp
+ }
+ }
}
analyzer := &analysis.Analyzer{
@@ -40,7 +63,7 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter {
).WithContextSetter(func(lintCtx *linter.Context) {
analyzer.Run = func(pass *analysis.Pass) (any, error) {
once.Do(func() {
- results, err := gomoddirectives.Analyze(opts)
+ results, err := gomoddirectives.AnalyzePass(pass, opts)
if err != nil {
lintCtx.Log.Warnf("running %s failed: %s: "+
"if you are not using go modules it is suggested to disable this linter", linterName, err)
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go
index 8f1036b0f1..8bddebc162 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go
@@ -73,7 +73,7 @@ func New(settings *config.GoModGuardSettings) *goanalysis.Linter {
}
analyzer.Run = func(pass *analysis.Pass) (any, error) {
- gomodguardIssues := processor.ProcessFiles(internal.GetFileNames(pass))
+ gomodguardIssues := processor.ProcessFiles(internal.GetGoFileNames(pass))
mu.Lock()
defer mu.Unlock()
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go
index a5367399b8..6b46beaccf 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go
@@ -184,7 +184,15 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) {
}
for k, v := range globalOptionMap {
- conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v))
+ option := gosec.GlobalOption(k)
+
+ // Set nosec global option only if the value is true
+ // https://github.com/securego/gosec/blob/v2.21.4/analyzer.go#L572
+ if option == gosec.Nosec && v == false {
+ continue
+ }
+
+ conf.SetGlobal(option, fmt.Sprintf("%v", v))
}
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go
index 4f6fb80358..bf9b19f129 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go
@@ -10,16 +10,16 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(s *config.GosmopolitanSettings) *goanalysis.Linter {
+func New(settings *config.GosmopolitanSettings) *goanalysis.Linter {
a := gosmopolitan.NewAnalyzer()
- cfgMap := map[string]map[string]any{}
- if s != nil {
- cfgMap[a.Name] = map[string]any{
- "allowtimelocal": s.AllowTimeLocal,
- "escapehatches": strings.Join(s.EscapeHatches, ","),
- "lookattests": !s.IgnoreTests,
- "watchforscripts": strings.Join(s.WatchForScripts, ","),
+ cfg := map[string]map[string]any{}
+ if settings != nil {
+ cfg[a.Name] = map[string]any{
+ "allowtimelocal": settings.AllowTimeLocal,
+ "escapehatches": strings.Join(settings.EscapeHatches, ","),
+ "lookattests": !settings.IgnoreTests,
+ "watchforscripts": strings.Join(settings.WatchForScripts, ","),
}
}
@@ -27,6 +27,6 @@ func New(s *config.GosmopolitanSettings) *goanalysis.Linter {
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go
index eb63a5d334..73bf63af73 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go
@@ -40,6 +40,7 @@ import (
"golang.org/x/tools/go/analysis/passes/slog"
"golang.org/x/tools/go/analysis/passes/sortslice"
"golang.org/x/tools/go/analysis/passes/stdmethods"
+ "golang.org/x/tools/go/analysis/passes/stdversion"
"golang.org/x/tools/go/analysis/passes/stringintconv"
"golang.org/x/tools/go/analysis/passes/structtag"
"golang.org/x/tools/go/analysis/passes/testinggoroutine"
@@ -50,6 +51,7 @@ import (
"golang.org/x/tools/go/analysis/passes/unsafeptr"
"golang.org/x/tools/go/analysis/passes/unusedresult"
"golang.org/x/tools/go/analysis/passes/unusedwrite"
+ "golang.org/x/tools/go/analysis/passes/waitgroup"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
@@ -89,6 +91,7 @@ var (
slog.Analyzer,
sortslice.Analyzer,
stdmethods.Analyzer,
+ stdversion.Analyzer,
stringintconv.Analyzer,
structtag.Analyzer,
testinggoroutine.Analyzer,
@@ -99,9 +102,10 @@ var (
unsafeptr.Analyzer,
unusedresult.Analyzer,
unusedwrite.Analyzer,
+ waitgroup.Analyzer,
}
- // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81
+ // https://github.com/golang/go/blob/go1.23.0/src/cmd/vet/main.go#L55-L87
defaultAnalyzers = []*analysis.Analyzer{
appends.Analyzer,
asmdecl.Analyzer,
@@ -126,6 +130,7 @@ var (
sigchanyzer.Analyzer,
slog.Analyzer,
stdmethods.Analyzer,
+ stdversion.Analyzer,
stringintconv.Analyzer,
structtag.Analyzer,
testinggoroutine.Analyzer,
@@ -185,7 +190,7 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers
}
// Keeping for backward compatibility.
- if cfg.CheckShadowing && name == shadow.Analyzer.Name {
+ if cfg.CheckShadowing != nil && *cfg.CheckShadowing && name == shadow.Analyzer.Name {
return true
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go
index aa6ce1cebb..e0a3f794a7 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go
@@ -11,9 +11,9 @@ import (
func New(settings *config.GrouperSettings) *goanalysis.Linter {
a := grouper.New()
- linterCfg := map[string]map[string]any{}
+ cfg := map[string]map[string]any{}
if settings != nil {
- linterCfg[a.Name] = map[string]any{
+ cfg[a.Name] = map[string]any{
"const-require-single-const": settings.ConstRequireSingleConst,
"const-require-grouping": settings.ConstRequireGrouping,
"import-require-single-import": settings.ImportRequireSingleImport,
@@ -29,6 +29,6 @@ func New(settings *config.GrouperSettings) *goanalysis.Linter {
a.Name,
"Analyze expression groups.",
[]*analysis.Analyzer{a},
- linterCfg,
+ cfg,
).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go
index 45117c9a48..b7c6c35aea 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go
@@ -51,8 +51,11 @@ func New(settings *config.ImportAsSettings) *goanalysis.Linter {
uniqPackages[a.Pkg] = a
}
- // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`).
- if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") {
+ // Skips the duplication check when:
+ // - the alias is empty.
+ // - the alias is a regular expression replacement pattern (ie. contains `$`).
+ v, ok := uniqAliases[a.Alias]
+ if ok && a.Alias != "" && !strings.Contains(a.Alias, "$") {
lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg)
} else {
uniqAliases[a.Alias] = a
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go
index f919c5b2a8..8e5e1f0e73 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go
@@ -3,20 +3,22 @@ package internal
import (
"bytes"
"fmt"
+ "go/ast"
"go/token"
+ "slices"
"strings"
diffpkg "github.com/sourcegraph/go-diff/diff"
+ "golang.org/x/tools/go/analysis"
- "github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/logutils"
- "github.com/golangci/golangci-lint/pkg/result"
)
type Change struct {
- LineRange result.Range
- Replacement result.Replacement
+ From, To int
+ NewLines []string
}
type diffLineType string
@@ -27,8 +29,6 @@ const (
diffLineDeleted diffLineType = "deleted"
)
-type fmtTextFormatter func(settings *config.LintersSettings) string
-
type diffLine struct {
originalNumber int // 1-based original line number
typ diffLineType
@@ -44,58 +44,48 @@ type hunkChangesParser struct {
log logutils.Log
- lines []diffLine
-
- ret []Change
+ changes []Change
}
-func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) {
- lines := bytes.Split(h.Body, []byte{'\n'})
- currentOriginalLineNumber := int(h.OrigStartLine)
- var ret []diffLine
+func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change {
+ lines := parseDiffLines(h)
- for i, line := range lines {
- dl := diffLine{
- originalNumber: currentOriginalLineNumber,
+ for i := 0; i < len(lines); {
+ line := lines[i]
+
+ if line.typ == diffLineOriginal {
+ p.handleOriginalLine(lines, line, &i)
+ continue
}
- lineStr := string(line)
+ var deletedLines []diffLine
+ for ; i < len(lines) && lines[i].typ == diffLineDeleted; i++ {
+ deletedLines = append(deletedLines, lines[i])
+ }
- if strings.HasPrefix(lineStr, "-") {
- dl.typ = diffLineDeleted
- dl.data = strings.TrimPrefix(lineStr, "-")
- currentOriginalLineNumber++
- } else if strings.HasPrefix(lineStr, "+") {
- dl.typ = diffLineAdded
- dl.data = strings.TrimPrefix(lineStr, "+")
- } else {
- if i == len(lines)-1 && lineStr == "" {
- // handle last \n: don't add an empty original line
- break
- }
+ var addedLines []string
+ for ; i < len(lines) && lines[i].typ == diffLineAdded; i++ {
+ addedLines = append(addedLines, lines[i].data)
+ }
- dl.typ = diffLineOriginal
- dl.data = strings.TrimPrefix(lineStr, " ")
- currentOriginalLineNumber++
+ if len(deletedLines) != 0 {
+ p.handleDeletedLines(deletedLines, addedLines)
+ continue
}
- ret = append(ret, dl)
+ // no deletions, only additions
+ p.handleAddedOnlyLines(addedLines)
}
- // if > 0, then the original file had a 'No newline at end of file' mark
- if h.OrigNoNewlineAt > 0 {
- dl := diffLine{
- originalNumber: currentOriginalLineNumber + 1,
- typ: diffLineAdded,
- data: "",
- }
- ret = append(ret, dl)
+ if len(p.replacementLinesToPrepend) != 0 {
+ p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", lines)
+ return nil
}
- p.lines = ret
+ return p.changes
}
-func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) {
+func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, i *int) {
if len(p.replacementLinesToPrepend) == 0 {
p.lastOriginalLine = &line
*i++
@@ -109,51 +99,39 @@ func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) {
*i++
var followingAddedLines []string
- for ; *i < len(p.lines) && p.lines[*i].typ == diffLineAdded; *i++ {
- followingAddedLines = append(followingAddedLines, p.lines[*i].data)
+ for ; *i < len(lines) && lines[*i].typ == diffLineAdded; *i++ {
+ followingAddedLines = append(followingAddedLines, lines[*i].data)
+ }
+
+ change := Change{
+ From: line.originalNumber,
+ To: line.originalNumber,
+ NewLines: slices.Concat(p.replacementLinesToPrepend, []string{line.data}, followingAddedLines),
}
+ p.changes = append(p.changes, change)
- p.ret = append(p.ret, Change{
- LineRange: result.Range{
- From: line.originalNumber,
- To: line.originalNumber,
- },
- Replacement: result.Replacement{
- NewLines: append(p.replacementLinesToPrepend, append([]string{line.data}, followingAddedLines...)...),
- },
- })
p.replacementLinesToPrepend = nil
p.lastOriginalLine = &line
}
func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) {
change := Change{
- LineRange: result.Range{
- From: deletedLines[0].originalNumber,
- To: deletedLines[len(deletedLines)-1].originalNumber,
- },
+ From: deletedLines[0].originalNumber,
+ To: deletedLines[len(deletedLines)-1].originalNumber,
}
- if len(addedLines) != 0 {
- change.Replacement.NewLines = append([]string{}, p.replacementLinesToPrepend...)
- change.Replacement.NewLines = append(change.Replacement.NewLines, addedLines...)
- if len(p.replacementLinesToPrepend) != 0 {
- p.replacementLinesToPrepend = nil
- }
-
- p.ret = append(p.ret, change)
- return
- }
+ switch {
+ case len(addedLines) != 0:
+ change.NewLines = slices.Concat(p.replacementLinesToPrepend, addedLines)
+ p.replacementLinesToPrepend = nil
- // delete-only change with possible prepending
- if len(p.replacementLinesToPrepend) != 0 {
- change.Replacement.NewLines = p.replacementLinesToPrepend
+ case len(p.replacementLinesToPrepend) != 0:
+ // delete-only change with possible prepending
+ change.NewLines = slices.Clone(p.replacementLinesToPrepend)
p.replacementLinesToPrepend = nil
- } else {
- change.Replacement.NeedOnlyDelete = true
}
- p.ret = append(p.ret, change)
+ p.changes = append(p.changes, change)
}
func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) {
@@ -166,70 +144,92 @@ func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) {
// 2. ...
p.replacementLinesToPrepend = addedLines
+
return
}
// add-only change merged into the last original line with possible prepending
- p.ret = append(p.ret, Change{
- LineRange: result.Range{
- From: p.lastOriginalLine.originalNumber,
- To: p.lastOriginalLine.originalNumber,
- },
- Replacement: result.Replacement{
- NewLines: append(p.replacementLinesToPrepend, append([]string{p.lastOriginalLine.data}, addedLines...)...),
- },
- })
+ change := Change{
+ From: p.lastOriginalLine.originalNumber,
+ To: p.lastOriginalLine.originalNumber,
+ NewLines: slices.Concat(p.replacementLinesToPrepend, []string{p.lastOriginalLine.data}, addedLines),
+ }
+
+ p.changes = append(p.changes, change)
+
p.replacementLinesToPrepend = nil
}
-func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change {
- p.parseDiffLines(h)
+func parseDiffLines(h *diffpkg.Hunk) []diffLine {
+ lines := bytes.Split(h.Body, []byte{'\n'})
- for i := 0; i < len(p.lines); {
- line := p.lines[i]
- if line.typ == diffLineOriginal {
- p.handleOriginalLine(line, &i)
- continue
- }
+ currentOriginalLineNumber := int(h.OrigStartLine)
- var deletedLines []diffLine
- for ; i < len(p.lines) && p.lines[i].typ == diffLineDeleted; i++ {
- deletedLines = append(deletedLines, p.lines[i])
+ var diffLines []diffLine
+
+ for i, line := range lines {
+ dl := diffLine{
+ originalNumber: currentOriginalLineNumber,
}
- var addedLines []string
- for ; i < len(p.lines) && p.lines[i].typ == diffLineAdded; i++ {
- addedLines = append(addedLines, p.lines[i].data)
+ if i == len(lines)-1 && len(line) == 0 {
+ // handle last \n: don't add an empty original line
+ break
}
- if len(deletedLines) != 0 {
- p.handleDeletedLines(deletedLines, addedLines)
- continue
+ lineStr := string(line)
+
+ switch {
+ case strings.HasPrefix(lineStr, "-"):
+ dl.typ = diffLineDeleted
+ dl.data = strings.TrimPrefix(lineStr, "-")
+ currentOriginalLineNumber++
+
+ case strings.HasPrefix(lineStr, "+"):
+ dl.typ = diffLineAdded
+ dl.data = strings.TrimPrefix(lineStr, "+")
+
+ default:
+ dl.typ = diffLineOriginal
+ dl.data = strings.TrimPrefix(lineStr, " ")
+ currentOriginalLineNumber++
}
- // no deletions, only additions
- p.handleAddedOnlyLines(addedLines)
+ diffLines = append(diffLines, dl)
}
- if len(p.replacementLinesToPrepend) != 0 {
- p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", p.lines)
- return nil
+ // if > 0, then the original file had a 'No newline at end of file' mark
+ if h.OrigNoNewlineAt > 0 {
+ dl := diffLine{
+ originalNumber: currentOriginalLineNumber + 1,
+ typ: diffLineAdded,
+ data: "",
+ }
+ diffLines = append(diffLines, dl)
}
- return p.ret
+ return diffLines
}
-func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string, formatter fmtTextFormatter) ([]result.Issue, error) {
+func ExtractDiagnosticFromPatch(
+ pass *analysis.Pass,
+ file *ast.File,
+ patch string,
+ lintCtx *linter.Context,
+) error {
diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch))
if err != nil {
- return nil, fmt.Errorf("can't parse patch: %w", err)
+ return fmt.Errorf("can't parse patch: %w", err)
}
if len(diffs) == 0 {
- return nil, fmt.Errorf("got no diffs from patch parser: %v", patch)
+ return fmt.Errorf("got no diffs from patch parser: %v", patch)
}
- var issues []result.Issue
+ ft := pass.Fset.File(file.Pos())
+
+ adjLine := pass.Fset.PositionFor(file.Pos(), false).Line - pass.Fset.PositionFor(file.Pos(), true).Line
+
for _, d := range diffs {
if len(d.Hunks) == 0 {
lintCtx.Log.Warnf("Got no hunks in diff %+v", d)
@@ -242,23 +242,34 @@ func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName st
changes := p.parse(hunk)
for _, change := range changes {
- i := result.Issue{
- FromLinter: linterName,
- Pos: token.Position{
- Filename: d.NewName,
- Line: change.LineRange.From,
- },
- Text: formatter(lintCtx.Settings()),
- Replacement: &change.Replacement,
- }
- if change.LineRange.From != change.LineRange.To {
- i.LineRange = &change.LineRange
- }
-
- issues = append(issues, i)
+ pass.Report(toDiagnostic(ft, change, adjLine))
}
}
}
- return issues, nil
+ return nil
+}
+
+func toDiagnostic(ft *token.File, change Change, adjLine int) analysis.Diagnostic {
+ from := change.From + adjLine
+ if from > ft.LineCount() {
+ from = ft.LineCount()
+ }
+
+ start := ft.LineStart(from)
+
+ end := goanalysis.EndOfLinePos(ft, change.To+adjLine)
+
+ return analysis.Diagnostic{
+ Pos: start,
+ End: end,
+ Message: "File is not properly formatted",
+ SuggestedFixes: []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: start,
+ End: end,
+ NewText: []byte(strings.Join(change.NewLines, "\n")),
+ }},
+ }},
+ }
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go
index 80b194dd26..7525f2f2c5 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go
@@ -2,12 +2,12 @@ package internal
import (
"fmt"
- "path/filepath"
"strings"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
)
func FormatCode(code string, _ *config.Config) string {
@@ -18,16 +18,17 @@ func FormatCode(code string, _ *config.Config) string {
return fmt.Sprintf("`%s`", code)
}
-func GetFileNames(pass *analysis.Pass) []string {
- var fileNames []string
+func GetGoFileNames(pass *analysis.Pass) []string {
+ var filenames []string
+
for _, f := range pass.Files {
- fileName := pass.Fset.PositionFor(f.Pos(), true).Filename
- ext := filepath.Ext(fileName)
- if ext != "" && ext != ".go" {
- // position has been adjusted to a non-go file, revert to original file
- fileName = pass.Fset.PositionFor(f.Pos(), false).Filename
+ position, b := goanalysis.GetGoFilePosition(pass, f)
+ if !b {
+ continue
}
- fileNames = append(fileNames, fileName)
+
+ filenames = append(filenames, position.Filename)
}
- return fileNames
+
+ return filenames
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go
index 67f89eecbd..bad3b0c4e2 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go
@@ -4,19 +4,15 @@ import (
"bufio"
"errors"
"fmt"
- "go/token"
+ "go/ast"
"os"
"strings"
- "sync"
"unicode/utf8"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/golinters/internal"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "lll"
@@ -24,26 +20,15 @@ const linterName = "lll"
const goCommentDirectivePrefix = "//go:"
func New(settings *config.LllSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runLll(pass, settings)
+ err := runLll(pass, settings)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
},
}
@@ -53,40 +38,39 @@ func New(settings *config.LllSettings) *goanalysis.Linter {
"Reports long lines",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
-
+func runLll(pass *analysis.Pass, settings *config.LllSettings) error {
spaces := strings.Repeat(" ", settings.TabWidth)
- var issues []goanalysis.Issue
- for _, f := range fileNames {
- lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces)
+ for _, file := range pass.Files {
+ err := getLLLIssuesForFile(pass, file, settings.LineLength, spaces)
if err != nil {
- return nil, err
- }
-
- for i := range lintIssues {
- issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass))
+ return err
}
}
- return issues, nil
+ return nil
}
-func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) {
- var res []result.Issue
+func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, tabSpaces string) error {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ return nil
+ }
+
+ nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false)
- f, err := os.Open(filename)
+ f, err := os.Open(position.Filename)
if err != nil {
- return nil, fmt.Errorf("can't open file %s: %w", filename, err)
+ return fmt.Errorf("can't open file %s: %w", position.Filename, err)
}
+
defer f.Close()
+ ft := pass.Fset.File(file.Pos())
+
lineNumber := 0
multiImportEnabled := false
@@ -116,42 +100,34 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r
lineLen := utf8.RuneCountInString(line)
if lineLen > maxLineLen {
- res = append(res, result.Issue{
- Pos: token.Position{
- Filename: filename,
- Line: lineNumber,
- },
- Text: fmt.Sprintf("the line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen),
- FromLinter: linterName,
+ pass.Report(analysis.Diagnostic{
+ Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)),
+ Message: fmt.Sprintf("The line is %d characters long, which exceeds the maximum of %d characters.",
+ lineLen, maxLineLen),
})
}
}
if err := scanner.Err(); err != nil {
+ // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize
+ // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize
+ // we can return this line as a long line instead of returning an error.
+ // The reason for this change is that this case might happen with autogenerated files
+ // The go-bindata tool for instance might generate a file with a very long line.
+ // In this case, as it's an auto generated file, the warning returned by lll will
+ // be ignored.
+ // But if we return a linter error here, and this error happens for an autogenerated
+ // file the error will be discarded (fine), but all the subsequent errors for lll will
+ // be discarded for other files, and we'll miss legit error.
if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize {
- // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize
- // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize
- // we can return this line as a long line instead of returning an error.
- // The reason for this change is that this case might happen with autogenerated files
- // The go-bindata tool for instance might generate a file with a very long line.
- // In this case, as it's an auto generated file, the warning returned by lll will
- // be ignored.
- // But if we return a linter error here, and this error happens for an autogenerated
- // file the error will be discarded (fine), but all the subsequent errors for lll will
- // be discarded for other files, and we'll miss legit error.
- res = append(res, result.Issue{
- Pos: token.Position{
- Filename: filename,
- Line: lineNumber,
- Column: 1,
- },
- Text: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize),
- FromLinter: linterName,
+ pass.Report(analysis.Diagnostic{
+ Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)),
+ Message: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize),
})
} else {
- return nil, fmt.Errorf("can't scan file %s: %w", filename, err)
+ return fmt.Errorf("can't scan file %s: %w", position.Filename, err)
}
}
- return res, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go
index 077e8a512f..84c8d73635 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go
@@ -22,6 +22,9 @@ func New(settings *config.LoggerCheckSettings) *goanalysis.Linter {
if !settings.Logr {
disable = append(disable, "logr")
}
+ if !settings.Slog {
+ disable = append(disable, "slog")
+ }
if !settings.Zap {
disable = append(disable, "zap")
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go
index 08f12369e6..799c51c874 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go
@@ -8,16 +8,16 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.MaintIdxSettings) *goanalysis.Linter {
+func New(settings *config.MaintIdxSettings) *goanalysis.Linter {
analyzer := maintidx.Analyzer
- cfgMap := map[string]map[string]any{
+ cfg := map[string]map[string]any{
analyzer.Name: {"under": 20},
}
- if cfg != nil {
- cfgMap[analyzer.Name] = map[string]any{
- "under": cfg.Under,
+ if settings != nil {
+ cfg[analyzer.Name] = map[string]any{
+ "under": settings.Under,
}
}
@@ -25,6 +25,6 @@ func New(cfg *config.MaintIdxSettings) *goanalysis.Linter {
analyzer.Name,
analyzer.Doc,
[]*analysis.Analyzer{analyzer},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go
index ae4bf21842..b5ab4515e5 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go
@@ -2,40 +2,26 @@ package makezero
import (
"fmt"
- "sync"
"github.com/ashanbrown/makezero/makezero"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "makezero"
func New(settings *config.MakezeroSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runMakeZero(pass, settings)
+ err := runMakeZero(pass, settings)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
},
}
@@ -45,30 +31,25 @@ func New(settings *config.MakezeroSettings) *goanalysis.Linter {
"Finds slice declarations with non-zero initial length",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeTypesInfo)
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
-func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) {
+func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) error {
zero := makezero.NewLinter(settings.Always)
- var issues []goanalysis.Issue
-
for _, file := range pass.Files {
hints, err := zero.Run(pass.Fset, pass.TypesInfo, file)
if err != nil {
- return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err)
+ return fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err)
}
for _, hint := range hints {
- issues = append(issues, goanalysis.NewIssue(&result.Issue{
- Pos: hint.Position(),
- Text: hint.Details(),
- FromLinter: linterName,
- }, pass))
+ pass.Report(analysis.Diagnostic{
+ Pos: hint.Pos(),
+ Message: hint.Details(),
+ })
}
}
- return issues, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go
index 34b880b529..e15dfa3a5a 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go
@@ -1,70 +1,30 @@
package mirror
import (
- "sync"
-
"github.com/butuzov/mirror"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
func New() *goanalysis.Linter {
- var (
- mu sync.Mutex
- issues []goanalysis.Issue
- )
-
a := mirror.NewAnalyzer()
- a.Run = func(pass *analysis.Pass) (any, error) {
- // mirror only lints test files if the `--with-tests` flag is passed,
- // so we pass the `with-tests` flag as true to the analyzer before running it.
- // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files`
- // or can be disabled per linter via exclude rules.
- // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262)
- violations := mirror.Run(pass, true)
-
- if len(violations) == 0 {
- return nil, nil
- }
-
- for index := range violations {
- i := violations[index].Issue(pass.Fset)
- issue := result.Issue{
- FromLinter: a.Name,
- Text: i.Message,
- Pos: i.Start,
- }
-
- if i.InlineFix != "" {
- issue.Replacement = &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: i.Start.Column - 1,
- Length: len(i.Original),
- NewString: i.InlineFix,
- },
- }
- }
-
- mu.Lock()
- issues = append(issues, goanalysis.NewIssue(&issue, pass))
- mu.Unlock()
- }
-
- return nil, nil
+ // mirror only lints test files if the `--with-tests` flag is passed,
+ // so we pass the `with-tests` flag as true to the analyzer before running it.
+ // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files`
+ // or can be disabled per linter via exclude rules.
+ // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262)
+ linterCfg := map[string]map[string]any{
+ a.Name: {
+ "with-tests": true,
+ },
}
- analyzer := goanalysis.NewLinter(
+ return goanalysis.NewLinter(
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
- nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return issues
- }).WithLoadMode(goanalysis.LoadModeTypesInfo)
-
- return analyzer
+ linterCfg,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go
index 44409cec9d..3ace5fddb9 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go
@@ -2,9 +2,9 @@ package misspell
import (
"fmt"
+ "go/ast"
"go/token"
"strings"
- "sync"
"unicode"
"github.com/golangci/misspell"
@@ -12,17 +12,12 @@ import (
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/golinters/internal"
"github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "misspell"
func New(settings *config.MisspellSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
@@ -42,42 +37,25 @@ func New(settings *config.MisspellSettings) *goanalysis.Linter {
return nil, ruleErr
}
- issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode)
+ err := runMisspell(lintCtx, pass, replacer, settings.Mode)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
}
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
}).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) {
- fileNames := internal.GetFileNames(pass)
-
- var issues []goanalysis.Issue
- for _, filename := range fileNames {
- lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode)
+func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) error {
+ for _, file := range pass.Files {
+ err := runMisspellOnFile(lintCtx, pass, file, replacer, mode)
if err != nil {
- return nil, err
- }
-
- for i := range lintIssues {
- issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass))
+ return err
}
}
- return issues, nil
+ return nil
}
func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) {
@@ -112,10 +90,15 @@ func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replac
return replacer, nil
}
-func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) {
- fileContent, err := lintCtx.FileCache.GetFileBytes(filename)
+func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.File, replacer *misspell.Replacer, mode string) error {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ return nil
+ }
+
+ fileContent, err := lintCtx.FileCache.GetFileBytes(position.Filename)
if err != nil {
- return nil, fmt.Errorf("can't get file %s contents: %w", filename, err)
+ return fmt.Errorf("can't get file %s contents: %w", position.Filename, err)
}
// `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments.
@@ -129,36 +112,31 @@ func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *missp
replace = replacer.Replace
}
- _, diffs := replace(string(fileContent))
+ f := pass.Fset.File(file.Pos())
- var res []result.Issue
+ _, diffs := replace(string(fileContent))
for _, diff := range diffs {
text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected)
- pos := token.Position{
- Filename: filename,
- Line: diff.Line,
- Column: diff.Column + 1,
- }
-
- replacement := &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: diff.Column,
- Length: len(diff.Original),
- NewString: diff.Corrected,
- },
- }
-
- res = append(res, result.Issue{
- Pos: pos,
- Text: text,
- FromLinter: linterName,
- Replacement: replacement,
+ start := f.LineStart(diff.Line) + token.Pos(diff.Column)
+ end := f.LineStart(diff.Line) + token.Pos(diff.Column+len(diff.Original))
+
+ pass.Report(analysis.Diagnostic{
+ Pos: start,
+ End: end,
+ Message: text,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: start,
+ End: end,
+ NewText: []byte(diff.Corrected),
+ }},
+ }},
})
}
- return res, nil
+ return nil
}
func appendExtraWords(replacer *misspell.Replacer, extraWords []config.MisspellExtraWords) error {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go
index 30047abfc2..a4e9ceff28 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go
@@ -8,11 +8,11 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(setting *config.MustTagSettings) *goanalysis.Linter {
+func New(settings *config.MustTagSettings) *goanalysis.Linter {
var funcs []musttag.Func
- if setting != nil {
- for _, fn := range setting.Functions {
+ if settings != nil {
+ for _, fn := range settings.Functions {
funcs = append(funcs, musttag.Func{
Name: fn.Name,
Tag: fn.Tag,
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go
index 43be973b0a..b72538fd16 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go
@@ -1,37 +1,21 @@
package nestif
import (
- "sort"
- "sync"
-
"github.com/nakabonne/nestif"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "nestif"
func New(settings *config.NestifSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
- Name: goanalysis.TheOnlyAnalyzerName,
+ Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues := runNestIf(pass, settings)
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
+ runNestIf(pass, settings)
return nil, nil
},
@@ -42,37 +26,34 @@ func New(settings *config.NestifSettings) *goanalysis.Linter {
"Reports deeply nested if statements",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue {
+func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) {
checker := &nestif.Checker{
MinComplexity: settings.MinComplexity,
}
- var lintIssues []nestif.Issue
- for _, f := range pass.Files {
- lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...)
- }
+ for _, file := range pass.Files {
+ position, isGoFile := goanalysis.GetGoFilePosition(pass, file)
+ if !isGoFile {
+ continue
+ }
- if len(lintIssues) == 0 {
- return nil
- }
+ issues := checker.Check(file, pass.Fset)
+ if len(issues) == 0 {
+ continue
+ }
- sort.SliceStable(lintIssues, func(i, j int) bool {
- return lintIssues[i].Complexity > lintIssues[j].Complexity
- })
+ nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false)
- issues := make([]goanalysis.Issue, 0, len(lintIssues))
- for _, i := range lintIssues {
- issues = append(issues, goanalysis.NewIssue(&result.Issue{
- Pos: i.Pos,
- Text: i.Message,
- FromLinter: linterName,
- }, pass))
- }
+ f := pass.Fset.File(file.Pos())
- return issues
+ for _, issue := range issues {
+ pass.Report(analysis.Diagnostic{
+ Pos: f.LineStart(goanalysis.AdjustPos(issue.Pos.Line, nonAdjPosition.Line, position.Line)),
+ Message: issue.Message,
+ })
+ }
+ }
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go
new file mode 100644
index 0000000000..8349377b7b
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go
@@ -0,0 +1,23 @@
+package nilnesserr
+
+import (
+ "github.com/alingse/nilnesserr"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
+ "github.com/golangci/golangci-lint/pkg/golinters/internal"
+)
+
+func New() *goanalysis.Linter {
+ a, err := nilnesserr.NewAnalyzer(nilnesserr.LinterSetting{})
+ if err != nil {
+ internal.LinterLogger.Fatalf("nilnesserr: create analyzer: %v", err)
+ }
+
+ return goanalysis.NewLinter(
+ a.Name,
+ a.Doc,
+ []*analysis.Analyzer{a},
+ nil,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go
index d8d677d999..ed25dec71f 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go
@@ -11,13 +11,13 @@ import (
func New(settings *config.NilNilSettings) *goanalysis.Linter {
a := analyzer.New()
- cfgMap := make(map[string]map[string]any)
+ cfg := make(map[string]map[string]any)
if settings != nil {
- cfgMap[a.Name] = map[string]any{
+ cfg[a.Name] = map[string]any{
"detect-opposite": settings.DetectOpposite,
}
if len(settings.CheckedTypes) != 0 {
- cfgMap[a.Name]["checked-types"] = settings.CheckedTypes
+ cfg[a.Name]["checked-types"] = settings.CheckedTypes
}
}
@@ -25,7 +25,7 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter {
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).
WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go
new file mode 100644
index 0000000000..5e9ba4117c
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go
@@ -0,0 +1,41 @@
+package internal
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+func formatExtraLeadingSpace(fullDirective string) string {
+ return fmt.Sprintf("directive `%s` should not have more than one leading space", fullDirective)
+}
+
+func formatNotMachine(fullDirective string) string {
+ expected := fullDirective[:2] + strings.TrimLeftFunc(fullDirective[2:], unicode.IsSpace)
+ return fmt.Sprintf("directive `%s` should be written without leading space as `%s`",
+ fullDirective, expected)
+}
+
+func formatNotSpecific(fullDirective, directiveWithOptionalLeadingSpace string) string {
+ return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`",
+ fullDirective, directiveWithOptionalLeadingSpace)
+}
+
+func formatParseError(fullDirective, directiveWithOptionalLeadingSpace string) string {
+ return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`",
+ fullDirective,
+ directiveWithOptionalLeadingSpace)
+}
+
+func formatNoExplanation(fullDirective, fullDirectiveWithoutExplanation string) string {
+ return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`",
+ fullDirective, fullDirectiveWithoutExplanation)
+}
+
+func formatUnusedCandidate(fullDirective, expectedLinter string) string {
+ details := fmt.Sprintf("directive `%s` is unused", fullDirective)
+ if expectedLinter != "" {
+ details += fmt.Sprintf(" for linter %q", expectedLinter)
+ }
+ return details
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go
index 08dd743783..21cd20124f 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go
@@ -2,123 +2,17 @@
package internal
import (
- "fmt"
- "go/ast"
"go/token"
"regexp"
"strings"
- "unicode"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/result"
)
-type BaseIssue struct {
- fullDirective string
- directiveWithOptionalLeadingSpace string
- position token.Position
- replacement *result.Replacement
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (b BaseIssue) Position() token.Position {
- return b.position
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (b BaseIssue) Replacement() *result.Replacement {
- return b.replacement
-}
-
-type ExtraLeadingSpace struct {
- BaseIssue
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (i ExtraLeadingSpace) Details() string {
- return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective)
-}
-
-func (i ExtraLeadingSpace) String() string { return toString(i) }
-
-type NotMachine struct {
- BaseIssue
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (i NotMachine) Details() string {
- expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace)
- return fmt.Sprintf("directive `%s` should be written without leading space as `%s`",
- i.fullDirective, expected)
-}
-
-func (i NotMachine) String() string { return toString(i) }
-
-type NotSpecific struct {
- BaseIssue
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (i NotSpecific) Details() string {
- return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`",
- i.fullDirective, i.directiveWithOptionalLeadingSpace)
-}
-
-func (i NotSpecific) String() string { return toString(i) }
-
-type ParseError struct {
- BaseIssue
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (i ParseError) Details() string {
- return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`",
- i.fullDirective,
- i.directiveWithOptionalLeadingSpace)
-}
-
-func (i ParseError) String() string { return toString(i) }
-
-type NoExplanation struct {
- BaseIssue
- fullDirectiveWithoutExplanation string
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (i NoExplanation) Details() string {
- return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`",
- i.fullDirective, i.fullDirectiveWithoutExplanation)
-}
-
-func (i NoExplanation) String() string { return toString(i) }
-
-type UnusedCandidate struct {
- BaseIssue
- ExpectedLinter string
-}
-
-//nolint:gocritic // TODO(ldez) must be change in the future.
-func (i UnusedCandidate) Details() string {
- details := fmt.Sprintf("directive `%s` is unused", i.fullDirective)
- if i.ExpectedLinter != "" {
- details += fmt.Sprintf(" for linter %q", i.ExpectedLinter)
- }
- return details
-}
-
-func (i UnusedCandidate) String() string { return toString(i) }
-
-func toString(issue Issue) string {
- return fmt.Sprintf("%s at %s", issue.Details(), issue.Position())
-}
-
-type Issue interface {
- Details() string
- Position() token.Position
- String() string
- Replacement() *result.Replacement
-}
-
-type Needs uint
+const LinterName = "nolintlint"
const (
NeedsMachineOnly Needs = 1 << iota
@@ -128,6 +22,10 @@ const (
NeedsAll = NeedsMachineOnly | NeedsSpecific | NeedsExplanation
)
+type Needs uint
+
+const commentMark = "//"
+
var commentPattern = regexp.MustCompile(`^//\s*(nolint)(:\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*)?\b`)
// matches a complete nolint directive
@@ -157,15 +55,10 @@ var (
)
//nolint:funlen,gocyclo // the function is going to be refactored in the future
-func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
- var issues []Issue
-
- for _, node := range nodes {
- file, ok := node.(*ast.File)
- if !ok {
- continue
- }
+func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) {
+ var issues []goanalysis.Issue
+ for _, file := range pass.Files {
for _, c := range file.Comments {
for _, comment := range c.List {
if !commentPattern.MatchString(comment.Text) {
@@ -180,47 +73,58 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
leadingSpace = leadingSpaceMatches[1]
}
- directiveWithOptionalLeadingSpace := "//"
+ directiveWithOptionalLeadingSpace := commentMark
if leadingSpace != "" {
directiveWithOptionalLeadingSpace += " "
}
- split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//")
+ split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], commentMark)
directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1])
- pos := fset.Position(comment.Pos())
- end := fset.Position(comment.End())
-
- base := BaseIssue{
- fullDirective: comment.Text,
- directiveWithOptionalLeadingSpace: directiveWithOptionalLeadingSpace,
- position: pos,
- }
+ pos := pass.Fset.Position(comment.Pos())
+ end := pass.Fset.Position(comment.End())
// check for, report and eliminate leading spaces, so we can check for other issues
if leadingSpace != "" {
- removeWhitespace := &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: pos.Column + 1,
- Length: len(leadingSpace),
- NewString: "",
- },
- }
+ removeWhitespace := []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: token.Pos(pos.Offset),
+ End: token.Pos(pos.Offset + len(commentMark) + len(leadingSpace)),
+ NewText: []byte(commentMark),
+ }},
+ }}
+
if (l.needs & NeedsMachineOnly) != 0 {
- issue := NotMachine{BaseIssue: base}
- issue.BaseIssue.replacement = removeWhitespace
- issues = append(issues, issue)
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatNotMachine(comment.Text),
+ Pos: pos,
+ SuggestedFixes: removeWhitespace,
+ }
+
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
} else if len(leadingSpace) > 1 {
- issue := ExtraLeadingSpace{BaseIssue: base}
- issue.BaseIssue.replacement = removeWhitespace
- issue.BaseIssue.replacement.Inline.NewString = " " // assume a single space was intended
- issues = append(issues, issue)
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatExtraLeadingSpace(comment.Text),
+ Pos: pos,
+ SuggestedFixes: removeWhitespace,
+ }
+
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
}
}
fullMatches := fullDirectivePattern.FindStringSubmatch(comment.Text)
if len(fullMatches) == 0 {
- issues = append(issues, ParseError{BaseIssue: base})
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatParseError(comment.Text, directiveWithOptionalLeadingSpace),
+ Pos: pos,
+ }
+
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
+
continue
}
@@ -230,7 +134,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
if lintersText != "" && !strings.HasPrefix(lintersText, "all") {
lls := strings.Split(lintersText, ",")
linters = make([]string, 0, len(lls))
- rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:")
+ rangeStart := (pos.Column - 1) + len(commentMark) + len(leadingSpace) + len("nolint:")
for i, ll := range lls {
rangeEnd := rangeStart + len(ll)
if i < len(lls)-1 {
@@ -246,46 +150,59 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
if (l.needs & NeedsSpecific) != 0 {
if len(linters) == 0 {
- issues = append(issues, NotSpecific{BaseIssue: base})
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatNotSpecific(comment.Text, directiveWithOptionalLeadingSpace),
+ Pos: pos,
+ }
+
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
}
}
// when detecting unused directives, we send all the directives through and filter them out in the nolint processor
if (l.needs & NeedsUnused) != 0 {
- removeNolintCompletely := &result.Replacement{}
+ removeNolintCompletely := []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: token.Pos(pos.Offset),
+ End: token.Pos(end.Offset),
+ NewText: nil,
+ }},
+ }}
- startCol := pos.Column - 1
-
- if startCol == 0 {
- // if the directive starts from a new line, remove the line
- removeNolintCompletely.NeedOnlyDelete = true
- } else {
- removeNolintCompletely.Inline = &result.InlineFix{
- StartCol: startCol,
- Length: end.Column - pos.Column,
- NewString: "",
+ if len(linters) == 0 {
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatUnusedCandidate(comment.Text, ""),
+ Pos: pos,
+ ExpectNoLint: true,
+ SuggestedFixes: removeNolintCompletely,
}
- }
- if len(linters) == 0 {
- issue := UnusedCandidate{BaseIssue: base}
- issue.replacement = removeNolintCompletely
- issues = append(issues, issue)
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
} else {
for _, linter := range linters {
- issue := UnusedCandidate{BaseIssue: base, ExpectedLinter: linter}
- // only offer replacement if there is a single linter
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatUnusedCandidate(comment.Text, linter),
+ Pos: pos,
+ ExpectNoLint: true,
+ ExpectedNoLintLinter: linter,
+ }
+
+ // only offer SuggestedFix if there is a single linter
// because of issues around commas and the possibility of all
// linters being removed
if len(linters) == 1 {
- issue.replacement = removeNolintCompletely
+ issue.SuggestedFixes = removeNolintCompletely
}
- issues = append(issues, issue)
+
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
}
}
}
- if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") {
+ if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == commentMark) {
needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation
// otherwise, check if we are excluding all the mentioned linters
for _, ll := range linters {
@@ -297,10 +214,14 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
if needsExplanation {
fullDirectiveWithoutExplanation := trailingBlankExplanation.ReplaceAllString(comment.Text, "")
- issues = append(issues, NoExplanation{
- BaseIssue: base,
- fullDirectiveWithoutExplanation: fullDirectiveWithoutExplanation,
- })
+
+ issue := &result.Issue{
+ FromLinter: LinterName,
+ Text: formatNoExplanation(comment.Text, fullDirectiveWithoutExplanation),
+ Pos: pos,
+ }
+
+ issues = append(issues, goanalysis.NewIssue(issue, pass))
}
}
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go
index 9f04454a5a..e1c878628d 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go
@@ -2,31 +2,47 @@ package nolintlint
import (
"fmt"
- "go/ast"
"sync"
"golang.org/x/tools/go/analysis"
+ "github.com/golangci/golangci-lint/pkg/golinters/internal"
+
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal"
+ nolintlint "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal"
"github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
-const LinterName = "nolintlint"
+const LinterName = nolintlint.LinterName
func New(settings *config.NoLintLintSettings) *goanalysis.Linter {
var mu sync.Mutex
var resIssues []goanalysis.Issue
+ var needs nolintlint.Needs
+ if settings.RequireExplanation {
+ needs |= nolintlint.NeedsExplanation
+ }
+ if settings.RequireSpecific {
+ needs |= nolintlint.NeedsSpecific
+ }
+ if !settings.AllowUnused {
+ needs |= nolintlint.NeedsUnused
+ }
+
+ lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation)
+ if err != nil {
+ internal.LinterLogger.Fatalf("%s: create analyzer: %v", nolintlint.LinterName, err)
+ }
+
analyzer := &analysis.Analyzer{
- Name: LinterName,
+ Name: nolintlint.LinterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runNoLintLint(pass, settings)
+ issues, err := lnt.Run(pass)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("linter failed to run: %w", err)
}
if len(issues) == 0 {
@@ -42,7 +58,7 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter {
}
return goanalysis.NewLinter(
- LinterName,
+ nolintlint.LinterName,
"Reports ill-formed or insufficient nolint directives",
[]*analysis.Analyzer{analyzer},
nil,
@@ -50,55 +66,3 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter {
return resIssues
}).WithLoadMode(goanalysis.LoadModeSyntax)
}
-
-func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) {
- var needs internal.Needs
- if settings.RequireExplanation {
- needs |= internal.NeedsExplanation
- }
- if settings.RequireSpecific {
- needs |= internal.NeedsSpecific
- }
- if !settings.AllowUnused {
- needs |= internal.NeedsUnused
- }
-
- lnt, err := internal.NewLinter(needs, settings.AllowNoExplanation)
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ast.Node, 0, len(pass.Files))
- for _, n := range pass.Files {
- nodes = append(nodes, n)
- }
-
- lintIssues, err := lnt.Run(pass.Fset, nodes...)
- if err != nil {
- return nil, fmt.Errorf("linter failed to run: %w", err)
- }
-
- var issues []goanalysis.Issue
-
- for _, i := range lintIssues {
- expectNoLint := false
- var expectedNolintLinter string
- if ii, ok := i.(internal.UnusedCandidate); ok {
- expectedNolintLinter = ii.ExpectedLinter
- expectNoLint = true
- }
-
- issue := &result.Issue{
- FromLinter: LinterName,
- Text: i.Details(),
- Pos: i.Position(),
- ExpectNoLint: expectNoLint,
- ExpectedNoLintLinter: expectedNolintLinter,
- Replacement: i.Replacement(),
- }
-
- issues = append(issues, goanalysis.NewIssue(issue, pass))
- }
-
- return issues, nil
-}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go
index ce7ff9d59c..17e86c98ee 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go
@@ -2,7 +2,6 @@ package prealloc
import (
"fmt"
- "sync"
"github.com/alexkohler/prealloc/pkg"
"golang.org/x/tools/go/analysis"
@@ -10,29 +9,16 @@ import (
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/golinters/internal"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "prealloc"
func New(settings *config.PreallocSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues := runPreAlloc(pass, settings)
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
+ runPreAlloc(pass, settings)
return nil, nil
},
@@ -43,23 +29,16 @@ func New(settings *config.PreallocSettings) *goanalysis.Linter {
"Finds slice declarations that could potentially be pre-allocated",
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
-func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue {
- var issues []goanalysis.Issue
-
+func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) {
hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops)
for _, hint := range hints {
- issues = append(issues, goanalysis.NewIssue(&result.Issue{
- Pos: pass.Fset.Position(hint.Pos),
- Text: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)),
- FromLinter: linterName,
- }, pass))
+ pass.Report(analysis.Diagnostic{
+ Pos: hint.Pos,
+ Message: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)),
+ })
}
-
- return issues
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go
index 302ce67b88..6c65f86bcf 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go
@@ -1,21 +1,14 @@
package protogetter
import (
- "sync"
-
"github.com/ghostiam/protogetter"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
func New(settings *config.ProtoGetterSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
var cfg protogetter.Config
if settings != nil {
cfg = protogetter.Config{
@@ -25,50 +18,15 @@ func New(settings *config.ProtoGetterSettings) *goanalysis.Linter {
ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend,
}
}
- cfg.Mode = protogetter.GolangciLintMode
-
- a := protogetter.NewAnalyzer(&cfg)
- a.Run = func(pass *analysis.Pass) (any, error) {
- pgIssues, err := protogetter.Run(pass, &cfg)
- if err != nil {
- return nil, err
- }
-
- issues := make([]goanalysis.Issue, len(pgIssues))
- for i, issue := range pgIssues {
- report := &result.Issue{
- FromLinter: a.Name,
- Pos: issue.Pos,
- Text: issue.Message,
- Replacement: &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: issue.InlineFix.StartCol,
- Length: issue.InlineFix.Length,
- NewString: issue.InlineFix.NewString,
- },
- },
- }
- issues[i] = goanalysis.NewIssue(report, pass)
- }
-
- if len(issues) == 0 {
- return nil, nil
- }
+ cfg.Mode = protogetter.StandaloneMode
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
- return nil, nil
- }
+ a := protogetter.NewAnalyzer(&cfg)
return goanalysis.NewLinter(
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeTypesInfo)
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go
index 8b030f15de..3af4885b40 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go
@@ -4,11 +4,19 @@ import (
"github.com/raeperd/recvcheck"
"golang.org/x/tools/go/analysis"
+ "github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New() *goanalysis.Linter {
- a := recvcheck.Analyzer
+func New(settings *config.RecvcheckSettings) *goanalysis.Linter {
+ var cfg recvcheck.Settings
+
+ if settings != nil {
+ cfg.DisableBuiltin = settings.DisableBuiltin
+ cfg.Exclusions = settings.Exclusions
+ }
+
+ a := recvcheck.NewAnalyzer(cfg)
return goanalysis.NewLinter(
a.Name,
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go
index 056a258e0a..ec621ccfba 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go
@@ -2,6 +2,7 @@ package revive
import (
"bytes"
+ "cmp"
"encoding/json"
"fmt"
"go/token"
@@ -114,7 +115,7 @@ func newWrapper(settings *config.ReviveSettings) (*wrapper, error) {
}
func (w *wrapper) run(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) {
- packages := [][]string{internal.GetFileNames(pass)}
+ packages := [][]string{internal.GetGoFileNames(pass)}
failures, err := w.revive.Lint(packages, w.lintingRules, *w.conf)
if err != nil {
@@ -164,7 +165,7 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue {
lineRangeTo = object.Position.Start.Line
}
- return goanalysis.NewIssue(&result.Issue{
+ issue := &result.Issue{
Severity: string(object.Severity),
Text: fmt.Sprintf("%s: %s", object.RuleName, object.Failure.Failure),
Pos: token.Position{
@@ -178,7 +179,24 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue {
To: lineRangeTo,
},
FromLinter: linterName,
- }, pass)
+ }
+
+ if object.ReplacementLine != "" {
+ f := pass.Fset.File(token.Pos(object.Position.Start.Offset))
+
+ // Skip cgo files because the positions are wrong.
+ if object.GetFilename() == f.Name() {
+ issue.SuggestedFixes = []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: f.LineStart(object.Position.Start.Line),
+ End: goanalysis.EndOfLinePos(f, object.Position.End.Line),
+ NewText: []byte(object.ReplacementLine),
+ }},
+ }}
+ }
+ }
+
+ return goanalysis.NewIssue(issue, pass)
}
// This function mimics the GetConfig function of revive.
@@ -379,12 +397,8 @@ const defaultConfidence = 0.8
func normalizeConfig(cfg *lint.Config) {
// NOTE(ldez): this custom section for golangci-lint should be kept.
// ---
- if cfg.Confidence == 0 {
- cfg.Confidence = defaultConfidence
- }
- if cfg.Severity == "" {
- cfg.Severity = lint.SeverityWarning
- }
+ cfg.Confidence = cmp.Or(cfg.Confidence, defaultConfidence)
+ cfg.Severity = cmp.Or(cfg.Severity, lint.SeverityWarning)
// ---
if len(cfg.Rules) == 0 {
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go
index f438c51b5c..7c8a0c8b02 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go
@@ -1,22 +1,15 @@
package tagalign
import (
- "sync"
-
"github.com/4meepo/tagalign"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
func New(settings *config.TagAlignSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
- options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)}
+ var options []tagalign.Option
if settings != nil {
options = append(options, tagalign.WithAlign(settings.Align))
@@ -32,44 +25,11 @@ func New(settings *config.TagAlignSettings) *goanalysis.Linter {
}
analyzer := tagalign.NewAnalyzer(options...)
- analyzer.Run = func(pass *analysis.Pass) (any, error) {
- taIssues := tagalign.Run(pass, options...)
-
- issues := make([]goanalysis.Issue, len(taIssues))
- for i, issue := range taIssues {
- report := &result.Issue{
- FromLinter: analyzer.Name,
- Pos: issue.Pos,
- Text: issue.Message,
- Replacement: &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: issue.InlineFix.StartCol,
- Length: issue.InlineFix.Length,
- NewString: issue.InlineFix.NewString,
- },
- },
- }
-
- issues[i] = goanalysis.NewIssue(report, pass)
- }
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
- return nil, nil
- }
return goanalysis.NewLinter(
analyzer.Name,
analyzer.Doc,
[]*analysis.Analyzer{analyzer},
nil,
- ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go
index d1674c3e9e..08215c3a53 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go
@@ -10,10 +10,12 @@ import (
func New(settings *config.TagliatelleSettings) *goanalysis.Linter {
cfg := tagliatelle.Config{
- Rules: map[string]string{
- "json": "camel",
- "yaml": "camel",
- "header": "header",
+ Base: tagliatelle.Base{
+ Rules: map[string]string{
+ "json": "camel",
+ "yaml": "camel",
+ "header": "header",
+ },
},
}
@@ -21,7 +23,23 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter {
for k, v := range settings.Case.Rules {
cfg.Rules[k] = v
}
+
+ cfg.ExtendedRules = toExtendedRules(settings.Case.ExtendedRules)
cfg.UseFieldName = settings.Case.UseFieldName
+ cfg.IgnoredFields = settings.Case.IgnoredFields
+
+ for _, override := range settings.Case.Overrides {
+ cfg.Overrides = append(cfg.Overrides, tagliatelle.Overrides{
+ Base: tagliatelle.Base{
+ Rules: override.Rules,
+ ExtendedRules: toExtendedRules(override.ExtendedRules),
+ UseFieldName: override.UseFieldName,
+ IgnoredFields: override.IgnoredFields,
+ Ignore: override.Ignore,
+ },
+ Package: override.Package,
+ })
+ }
}
a := tagliatelle.New(cfg)
@@ -31,5 +49,19 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter {
a.Doc,
[]*analysis.Analyzer{a},
nil,
- ).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
+
+func toExtendedRules(src map[string]config.TagliatelleExtendedRule) map[string]tagliatelle.ExtendedRule {
+ result := make(map[string]tagliatelle.ExtendedRule, len(src))
+
+ for k, v := range src {
+ result[k] = tagliatelle.ExtendedRule{
+ Case: v.Case,
+ ExtraInitialisms: v.ExtraInitialisms,
+ InitialismOverrides: v.InitialismOverrides,
+ }
+ }
+
+ return result
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go
index 632152712b..f617da5536 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go
@@ -10,19 +10,19 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.TestpackageSettings) *goanalysis.Linter {
+func New(settings *config.TestpackageSettings) *goanalysis.Linter {
a := testpackage.NewAnalyzer()
- var settings map[string]map[string]any
- if cfg != nil {
- settings = map[string]map[string]any{
+ var cfg map[string]map[string]any
+ if settings != nil {
+ cfg = map[string]map[string]any{
a.Name: {
- testpackage.SkipRegexpFlagName: cfg.SkipRegexp,
- testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","),
+ testpackage.SkipRegexpFlagName: settings.SkipRegexp,
+ testpackage.AllowPackagesFlagName: strings.Join(settings.AllowPackages, ","),
},
}
}
- return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings).
+ return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg).
WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go
index cc6ea755c9..102610a69a 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go
@@ -12,7 +12,7 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/internal"
)
-func New(cfg *config.ThelperSettings) *goanalysis.Linter {
+func New(settings *config.ThelperSettings) *goanalysis.Linter {
a := analyzer.NewAnalyzer()
opts := map[string]struct{}{
@@ -33,11 +33,11 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter {
"tb_first": {},
}
- if cfg != nil {
- applyTHelperOptions(cfg.Test, "t_", opts)
- applyTHelperOptions(cfg.Fuzz, "f_", opts)
- applyTHelperOptions(cfg.Benchmark, "b_", opts)
- applyTHelperOptions(cfg.TB, "tb_", opts)
+ if settings != nil {
+ applyTHelperOptions(settings.Test, "t_", opts)
+ applyTHelperOptions(settings.Fuzz, "f_", opts)
+ applyTHelperOptions(settings.Benchmark, "b_", opts)
+ applyTHelperOptions(settings.TB, "tb_", opts)
}
if len(opts) == 0 {
@@ -46,7 +46,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter {
args := maps.Keys(opts)
- cfgMap := map[string]map[string]any{
+ cfg := map[string]map[string]any{
a.Name: {
"checks": strings.Join(args, ","),
},
@@ -56,7 +56,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter {
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go
index 0fe1847366..04c9a223e5 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go
@@ -1,8 +1,6 @@
package unparam
import (
- "sync"
-
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/packages"
@@ -11,33 +9,21 @@ import (
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
const linterName = "unparam"
func New(settings *config.UnparamSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
analyzer := &analysis.Analyzer{
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Requires: []*analysis.Analyzer{buildssa.Analyzer},
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runUnparam(pass, settings)
+ err := runUnparam(pass, settings)
if err != nil {
return nil, err
}
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
return nil, nil
},
}
@@ -51,12 +37,10 @@ func New(settings *config.UnparamSettings) *goanalysis.Linter {
if settings.Algo != "cha" {
lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`")
}
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
}).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
-func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) {
+func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error {
ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
ssaPkg := ssa.Pkg
@@ -74,17 +58,15 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanal
unparamIssues, err := c.Check()
if err != nil {
- return nil, err
+ return err
}
- var issues []goanalysis.Issue
for _, i := range unparamIssues {
- issues = append(issues, goanalysis.NewIssue(&result.Issue{
- Pos: pass.Fset.Position(i.Pos()),
- Text: i.Message(),
- FromLinter: linterName,
- }, pass))
+ pass.Report(analysis.Diagnostic{
+ Pos: i.Pos(),
+ Message: i.Message(),
+ })
}
- return issues, nil
+ return nil
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go
index 050e47f24c..00f7d9742a 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go
@@ -8,24 +8,24 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter {
+func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter {
a := analyzer.New()
- cfgMap := make(map[string]map[string]any)
- if cfg != nil {
- cfgMap[a.Name] = map[string]any{
- analyzer.ConstantKindFlag: cfg.ConstantKind,
- analyzer.CryptoHashFlag: cfg.CryptoHash,
- analyzer.HTTPMethodFlag: cfg.HTTPMethod,
- analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode,
- analyzer.OSDevNullFlag: cfg.OSDevNull,
- analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath,
- analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel,
- analyzer.SyslogPriorityFlag: cfg.SyslogPriority,
- analyzer.TimeLayoutFlag: cfg.TimeLayout,
- analyzer.TimeMonthFlag: cfg.TimeMonth,
- analyzer.TimeWeekdayFlag: cfg.TimeWeekday,
- analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme,
+ cfg := make(map[string]map[string]any)
+ if settings != nil {
+ cfg[a.Name] = map[string]any{
+ analyzer.ConstantKindFlag: settings.ConstantKind,
+ analyzer.CryptoHashFlag: settings.CryptoHash,
+ analyzer.HTTPMethodFlag: settings.HTTPMethod,
+ analyzer.HTTPStatusCodeFlag: settings.HTTPStatusCode,
+ analyzer.OSDevNullFlag: settings.OSDevNull != nil && *settings.OSDevNull,
+ analyzer.RPCDefaultPathFlag: settings.DefaultRPCPath,
+ analyzer.SQLIsolationLevelFlag: settings.SQLIsolationLevel,
+ analyzer.SyslogPriorityFlag: settings.SyslogPriority != nil && *settings.SyslogPriority,
+ analyzer.TimeLayoutFlag: settings.TimeLayout,
+ analyzer.TimeMonthFlag: settings.TimeMonth,
+ analyzer.TimeWeekdayFlag: settings.TimeWeekday,
+ analyzer.TLSSignatureSchemeFlag: settings.TLSSignatureScheme,
}
}
@@ -33,6 +33,6 @@ func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter {
a.Name,
a.Doc,
[]*analysis.Analyzer{a},
- cfgMap,
+ cfg,
).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go
new file mode 100644
index 0000000000..a21742fbd6
--- /dev/null
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go
@@ -0,0 +1,33 @@
+package usetesting
+
+import (
+ "github.com/ldez/usetesting"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
+)
+
+func New(settings *config.UseTestingSettings) *goanalysis.Linter {
+ a := usetesting.NewAnalyzer()
+
+ cfg := make(map[string]map[string]any)
+ if settings != nil {
+ cfg[a.Name] = map[string]any{
+ "contextbackground": settings.ContextBackground,
+ "contexttodo": settings.ContextTodo,
+ "oschdir": settings.OSChdir,
+ "osmkdirtemp": settings.OSMkdirTemp,
+ "ossetenv": settings.OSSetenv,
+ "ostempdir": settings.OSTempDir,
+ "oscreatetemp": settings.OSCreateTemp,
+ }
+ }
+
+ return goanalysis.NewLinter(
+ a.Name,
+ a.Doc,
+ []*analysis.Analyzer{a},
+ cfg,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go
index 721bfada1c..d45969efce 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go
@@ -1,28 +1,17 @@
package whitespace
import (
- "fmt"
- "sync"
-
"github.com/ultraware/whitespace"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
- "github.com/golangci/golangci-lint/pkg/lint/linter"
- "github.com/golangci/golangci-lint/pkg/result"
)
-const linterName = "whitespace"
-
func New(settings *config.WhitespaceSettings) *goanalysis.Linter {
- var mu sync.Mutex
- var resIssues []goanalysis.Issue
-
var wsSettings whitespace.Settings
if settings != nil {
wsSettings = whitespace.Settings{
- Mode: whitespace.RunningModeGolangCI,
MultiIf: settings.MultiIf,
MultiFunc: settings.MultiFunc,
}
@@ -35,68 +24,5 @@ func New(settings *config.WhitespaceSettings) *goanalysis.Linter {
a.Doc,
[]*analysis.Analyzer{a},
nil,
- ).WithContextSetter(func(_ *linter.Context) {
- a.Run = func(pass *analysis.Pass) (any, error) {
- issues, err := runWhitespace(pass, wsSettings)
- if err != nil {
- return nil, err
- }
-
- if len(issues) == 0 {
- return nil, nil
- }
-
- mu.Lock()
- resIssues = append(resIssues, issues...)
- mu.Unlock()
-
- return nil, nil
- }
- }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
- return resIssues
- }).WithLoadMode(goanalysis.LoadModeSyntax)
-}
-
-func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) {
- lintIssues := whitespace.Run(pass, &wsSettings)
-
- issues := make([]goanalysis.Issue, len(lintIssues))
- for i, issue := range lintIssues {
- report := &result.Issue{
- FromLinter: linterName,
- Pos: pass.Fset.PositionFor(issue.Diagnostic, false),
- Text: issue.Message,
- }
-
- switch issue.MessageType {
- case whitespace.MessageTypeRemove:
- if len(issue.LineNumbers) == 0 {
- continue
- }
-
- report.LineRange = &result.Range{
- From: issue.LineNumbers[0],
- To: issue.LineNumbers[len(issue.LineNumbers)-1],
- }
-
- report.Replacement = &result.Replacement{NeedOnlyDelete: true}
-
- case whitespace.MessageTypeAdd:
- report.Pos = pass.Fset.PositionFor(issue.FixStart, false)
- report.Replacement = &result.Replacement{
- Inline: &result.InlineFix{
- StartCol: 0,
- Length: 1,
- NewString: "\n\t",
- },
- }
-
- default:
- return nil, fmt.Errorf("unknown message type: %v", issue.MessageType)
- }
-
- issues[i] = goanalysis.NewIssue(report, pass)
- }
-
- return issues, nil
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go
index 96ec2eeae0..b2f5ec7420 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go
@@ -11,6 +11,8 @@ import (
func New(settings *config.WrapcheckSettings) *goanalysis.Linter {
cfg := wrapcheck.NewDefaultConfig()
if settings != nil {
+ cfg.ExtraIgnoreSigs = settings.ExtraIgnoreSigs
+
if len(settings.IgnoreSigs) != 0 {
cfg.IgnoreSigs = settings.IgnoreSigs
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go
index 6d6d4b17e7..1e438a0f03 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go
@@ -164,12 +164,20 @@ func (lc *Config) WithNoopFallback(cfg *config.Config, cond func(cfg *config.Con
}
func IsGoLowerThanGo122() func(cfg *config.Config) error {
+ return isGoLowerThanGo("1.22")
+}
+
+func IsGoLowerThanGo124() func(cfg *config.Config) error {
+ return isGoLowerThanGo("1.24")
+}
+
+func isGoLowerThanGo(v string) func(cfg *config.Config) error {
return func(cfg *config.Config) error {
- if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, "1.22") {
+ if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, v) {
return nil
}
- return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go 1.22", cfg.Run.Go)
+ return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go %s", cfg.Run.Go, v)
}
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go
index d2a2dc3d07..4338aa88cc 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go
@@ -26,6 +26,7 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/exhaustive"
"github.com/golangci/golangci-lint/pkg/golinters/exhaustruct"
"github.com/golangci/golangci-lint/pkg/golinters/exportloopref"
+ "github.com/golangci/golangci-lint/pkg/golinters/exptostd"
"github.com/golangci/golangci-lint/pkg/golinters/fatcontext"
"github.com/golangci/golangci-lint/pkg/golinters/forbidigo"
"github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert"
@@ -72,6 +73,7 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/nakedret"
"github.com/golangci/golangci-lint/pkg/golinters/nestif"
"github.com/golangci/golangci-lint/pkg/golinters/nilerr"
+ "github.com/golangci/golangci-lint/pkg/golinters/nilnesserr"
"github.com/golangci/golangci-lint/pkg/golinters/nilnil"
"github.com/golangci/golangci-lint/pkg/golinters/nlreturn"
"github.com/golangci/golangci-lint/pkg/golinters/noctx"
@@ -105,6 +107,7 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/unparam"
"github.com/golangci/golangci-lint/pkg/golinters/unused"
"github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars"
+ "github.com/golangci/golangci-lint/pkg/golinters/usetesting"
"github.com/golangci/golangci-lint/pkg/golinters/varnamelen"
"github.com/golangci/golangci-lint/pkg/golinters/wastedassign"
"github.com/golangci/golangci-lint/pkg/golinters/whitespace"
@@ -160,7 +163,8 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.58.0").
WithPresets(linter.PresetStyle).
WithLoadForGoAnalysis().
- WithURL("https://github.com/lasiar/canonicalHeader"),
+ WithAutoFix().
+ WithURL("https://github.com/lasiar/canonicalheader"),
linter.NewConfig(containedctx.New()).
WithSince("v1.44.0").
@@ -182,18 +186,16 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(cyclop.New(&cfg.LintersSettings.Cyclop)).
WithSince("v1.37.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetComplexity).
WithURL("https://github.com/bkielbasa/cyclop"),
linter.NewConfig(decorder.New(&cfg.LintersSettings.Decorder)).
WithSince("v1.44.0").
- WithPresets(linter.PresetFormatting, linter.PresetStyle).
+ WithPresets(linter.PresetStyle).
WithURL("https://gitlab.com/bosi/decorder"),
linter.NewConfig(linter.NewNoopDeprecated("deadcode", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetUnused).
WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode").
DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"),
@@ -216,6 +218,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(dupword.New(&cfg.LintersSettings.DupWord)).
WithSince("v1.50.0").
WithPresets(linter.PresetComment).
+ WithAutoFix().
WithURL("https://github.com/Abirdcfly/dupword"),
linter.NewConfig(durationcheck.New()).
@@ -247,12 +250,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.32.0").
WithPresets(linter.PresetBugs, linter.PresetError).
WithLoadForGoAnalysis().
+ WithAutoFix().
WithURL("https://github.com/polyfloyd/go-errorlint"),
linter.NewConfig(linter.NewNoopDeprecated("execinquery", cfg, linter.DeprecationError)).
WithSince("v1.46.0").
WithPresets(linter.PresetSQL).
- WithLoadForGoAnalysis().
WithURL("https://github.com/1uf3/execinquery").
DeprecatedError("The repository of the linter has been archived by the owner.", "v1.58.0", ""),
@@ -265,7 +268,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(linter.NewNoopDeprecated("exhaustivestruct", cfg, linter.DeprecationError)).
WithSince("v1.32.0").
WithPresets(linter.PresetStyle, linter.PresetTest).
- WithLoadForGoAnalysis().
WithURL("https://github.com/mbilski/exhaustivestruct").
DeprecatedError("The repository of the linter has been deprecated by the owner.", "v1.46.0", "exhaustruct"),
@@ -282,6 +284,13 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/kyoh86/exportloopref").
DeprecatedWarning("Since Go1.22 (loopvar) this linter is no longer relevant.", "v1.60.2", "copyloopvar"),
+ linter.NewConfig(exptostd.New()).
+ WithSince("v1.63.0").
+ WithPresets(linter.PresetStyle).
+ WithLoadForGoAnalysis().
+ WithAutoFix().
+ WithURL("https://github.com/ldez/exptostd"),
+
linter.NewConfig(forbidigo.New(&cfg.LintersSettings.Forbidigo)).
WithSince("v1.34.0").
WithPresets(linter.PresetStyle).
@@ -301,6 +310,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.58.0").
WithPresets(linter.PresetPerformance).
WithLoadForGoAnalysis().
+ WithAutoFix().
WithURL("https://github.com/Crocmagnon/fatcontext"),
linter.NewConfig(funlen.New(&cfg.LintersSettings.Funlen)).
@@ -318,6 +328,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.51.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/nunnatsa/ginkgolinter"),
linter.NewConfig(gocheckcompilerdirectives.New()).
@@ -379,6 +390,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithPresets(linter.PresetStyle, linter.PresetError).
WithLoadForGoAnalysis().
WithAlternativeNames("goerr113").
+ WithAutoFix().
WithURL("https://github.com/Djarvur/go-err113"),
linter.NewConfig(gofmt.New(&cfg.LintersSettings.Gofmt)).
@@ -407,7 +419,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(linter.NewNoopDeprecated("golint", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
WithURL("https://github.com/golang/lint").
DeprecatedError("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"),
@@ -451,6 +462,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
WithAlternativeNames(megacheckName).
+ WithAutoFix().
WithURL("https://github.com/dominikh/go-tools/tree/master/simple"),
linter.NewConfig(gosmopolitan.New(&cfg.LintersSettings.Gosmopolitan)).
@@ -464,6 +476,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.0.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetBugs, linter.PresetMetaLinter).
+ WithAutoFix().
WithAlternativeNames("vet", "vetshadow").
WithURL("https://pkg.go.dev/cmd/vet"),
@@ -482,12 +495,14 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.62.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/uudashr/iface"),
linter.NewConfig(importas.New(&cfg.LintersSettings.ImportAs)).
WithSince("v1.38.0").
WithPresets(linter.PresetStyle).
WithLoadForGoAnalysis().
+ WithAutoFix().
WithURL("https://github.com/julz/importas"),
linter.NewConfig(inamedparam.New(&cfg.LintersSettings.Inamedparam)).
@@ -508,7 +523,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(linter.NewNoopDeprecated("interfacer", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
WithURL("https://github.com/mvdan/interfacer").
DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", ""),
@@ -517,6 +531,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.57.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/ckaznocha/intrange").
WithNoopFallback(cfg, linter.IsGoLowerThanGo122()),
@@ -550,7 +565,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(linter.NewNoopDeprecated("maligned", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetPerformance).
WithURL("https://github.com/mdempsky/maligned").
DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"),
@@ -577,6 +591,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(nakedret.New(&cfg.LintersSettings.Nakedret)).
WithSince("v1.19.0").
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/alexkohler/nakedret"),
linter.NewConfig(nestif.New(&cfg.LintersSettings.Nestif)).
@@ -590,6 +605,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithPresets(linter.PresetBugs).
WithURL("https://github.com/gostaticanalysis/nilerr"),
+ linter.NewConfig(nilnesserr.New()).
+ WithSince("v1.63.0").
+ WithLoadForGoAnalysis().
+ WithPresets(linter.PresetBugs).
+ WithURL("https://github.com/alingse/nilnesserr"),
+
linter.NewConfig(nilnil.New(&cfg.LintersSettings.NilNil)).
WithSince("v1.43.0").
WithPresets(linter.PresetStyle).
@@ -599,6 +620,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(nlreturn.New(&cfg.LintersSettings.Nlreturn)).
WithSince("v1.30.0").
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/ssgreg/nlreturn"),
linter.NewConfig(noctx.New()).
@@ -634,6 +656,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.55.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetPerformance).
+ WithAutoFix().
WithURL("https://github.com/catenacyber/perfsprint"),
linter.NewConfig(prealloc.New(&cfg.LintersSettings.Prealloc)).
@@ -664,7 +687,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithLoadForGoAnalysis().
WithURL("https://github.com/curioswitch/go-reassign"),
- linter.NewConfig(recvcheck.New()).
+ linter.NewConfig(recvcheck.New(&cfg.LintersSettings.Recvcheck)).
WithSince("v1.62.0").
WithPresets(linter.PresetBugs).
WithLoadForGoAnalysis().
@@ -674,6 +697,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.37.0").
WithPresets(linter.PresetStyle, linter.PresetMetaLinter).
ConsiderSlow().
+ WithAutoFix().
WithURL("https://github.com/mgechev/revive"),
linter.NewConfig(rowserrcheck.New(&cfg.LintersSettings.RowsErrCheck)).
@@ -685,7 +709,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(sloglint.New(&cfg.LintersSettings.SlogLint)).
WithSince("v1.55.0").
WithLoadForGoAnalysis().
- WithPresets(linter.PresetStyle, linter.PresetFormatting).
+ WithPresets(linter.PresetStyle).
WithURL("https://github.com/go-simpler/sloglint"),
linter.NewConfig(linter.NewNoopDeprecated("scopelint", cfg, linter.DeprecationError)).
@@ -712,11 +736,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithLoadForGoAnalysis().
WithPresets(linter.PresetBugs, linter.PresetMetaLinter).
WithAlternativeNames(megacheckName).
+ WithAutoFix().
WithURL("https://staticcheck.dev/"),
linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetUnused).
WithURL("https://github.com/opennota/check").
DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"),
@@ -725,17 +749,19 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.20.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"),
linter.NewConfig(tagalign.New(&cfg.LintersSettings.TagAlign)).
WithSince("v1.53.0").
- WithPresets(linter.PresetStyle, linter.PresetFormatting).
+ WithPresets(linter.PresetStyle).
WithAutoFix().
WithURL("https://github.com/4meepo/tagalign"),
linter.NewConfig(tagliatelle.New(&cfg.LintersSettings.Tagliatelle)).
WithSince("v1.40.0").
WithPresets(linter.PresetStyle).
+ WithLoadForGoAnalysis().
WithURL("https://github.com/ldez/tagliatelle"),
linter.NewConfig(tenv.New(&cfg.LintersSettings.Tenv)).
@@ -753,6 +779,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.55.0").
WithPresets(linter.PresetTest, linter.PresetBugs).
WithLoadForGoAnalysis().
+ WithAutoFix().
WithURL("https://github.com/Antonboom/testifylint"),
linter.NewConfig(testpackage.New(&cfg.LintersSettings.Testpackage)).
@@ -802,11 +829,18 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(usestdlibvars.New(&cfg.LintersSettings.UseStdlibVars)).
WithSince("v1.48.0").
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/sashamelentyev/usestdlibvars"),
+ linter.NewConfig(usetesting.New(&cfg.LintersSettings.UseTesting)).
+ WithSince("v1.63.0").
+ WithPresets(linter.PresetTest).
+ WithLoadForGoAnalysis().
+ WithAutoFix().
+ WithURL("https://github.com/ldez/usetesting"),
+
linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
- WithLoadForGoAnalysis().
WithPresets(linter.PresetUnused).
WithURL("https://github.com/opennota/check").
DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"),
@@ -838,6 +872,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(wsl.New(&cfg.LintersSettings.WSL)).
WithSince("v1.20.0").
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/bombsimon/wsl"),
linter.NewConfig(zerologlint.New()).
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
index 2c47c7166e..157fde715f 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
@@ -10,6 +10,7 @@ import (
"github.com/golangci/golangci-lint/internal/errorutil"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/fsutils"
+ "github.com/golangci/golangci-lint/pkg/goformatters"
"github.com/golangci/golangci-lint/pkg/goutil"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/lint/lintersdb"
@@ -60,6 +61,11 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti
return nil, fmt.Errorf("failed to get enabled linters: %w", err)
}
+ metaFormatter, err := goformatters.NewMetaFormatter(log, cfg, enabledLinters)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create meta-formatter: %w", err)
+ }
+
return &Runner{
Processors: []processors.Processor{
processors.NewCgo(goenv),
@@ -94,7 +100,7 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti
processors.NewSeverity(log.Child(logutils.DebugKeySeverityRules), files, &cfg.Severity),
// The fixer still needs to see paths for the issues that are relative to the current directory.
- processors.NewFixer(cfg, log, fileCache),
+ processors.NewFixer(cfg, log, fileCache, metaFormatter),
// Now we can modify the issues for output.
processors.NewPathPrefixer(cfg.Output.PathPrefix),
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
index b65339682c..7c5932368f 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
@@ -12,7 +12,7 @@ const defaultCodeClimateSeverity = "critical"
// CodeClimateIssue is a subset of the Code Climate spec.
// https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types
// It is just enough to support GitLab CI Code Quality.
-// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool
+// https://docs.gitlab.com/ee/ci/testing/code_quality.html#code-quality-report-format
type CodeClimateIssue struct {
Description string `json:"description"`
CheckName string `json:"check_name"`
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go
index 1d1c9f7d32..83c4959114 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go
@@ -4,7 +4,6 @@ import (
"fmt"
"io"
"strings"
- "unicode/utf8"
"github.com/golangci/golangci-lint/pkg/result"
)
@@ -112,11 +111,9 @@ func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int,
}
func cutVal(s string, limit int) string {
- var size, count int
- for i := 0; i < limit && count < len(s); i++ {
- _, size = utf8.DecodeRuneInString(s[count:])
- count += size
+ runes := []rune(s)
+ if len(runes) > limit {
+ return string(runes[:limit])
}
-
- return s[:count]
+ return s
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go
index 32246a6df4..e338963fa3 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go
@@ -5,6 +5,7 @@ import (
"fmt"
"go/token"
+ "golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
)
@@ -12,18 +13,6 @@ type Range struct {
From, To int
}
-type Replacement struct {
- NeedOnlyDelete bool // need to delete all lines of the issue without replacement with new lines
- NewLines []string // if NeedDelete is false it's the replacement lines
- Inline *InlineFix
-}
-
-type InlineFix struct {
- StartCol int // zero-based
- Length int // length of chunk to be replaced
- NewString string
-}
-
type Issue struct {
FromLinter string
Text string
@@ -33,19 +22,19 @@ type Issue struct {
// Source lines of a code with the issue to show
SourceLines []string
- // If we know how to fix the issue we can provide replacement lines
- Replacement *Replacement
-
// Pkg is needed for proper caching of linting results
Pkg *packages.Package `json:"-"`
- LineRange *Range `json:",omitempty"`
-
Pos token.Position
+ LineRange *Range `json:",omitempty"`
+
// HunkPos is used only when golangci-lint is run over a diff
HunkPos int `json:",omitempty"`
+ // If we know how to fix the issue we can provide replacement lines
+ SuggestedFixes []analysis.SuggestedFix `json:",omitempty"`
+
// If we are expecting a nolint (because this is from nolintlint), record the expected linter
ExpectNoLint bool
ExpectedNoLintLinter string
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go
index 764af5a923..b82c7f2071 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go
@@ -1,16 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// This file is inspired by go/analysis/internal/checker/checker.go
+
package processors
import (
- "bytes"
+ "errors"
"fmt"
"os"
- "path/filepath"
- "sort"
- "strings"
+ "slices"
+
+ "golang.org/x/exp/maps"
- "github.com/golangci/golangci-lint/internal/go/robustio"
+ "github.com/golangci/golangci-lint/internal/x/tools/diff"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/fsutils"
+ "github.com/golangci/golangci-lint/pkg/goformatters"
+ "github.com/golangci/golangci-lint/pkg/goformatters/gci"
+ "github.com/golangci/golangci-lint/pkg/goformatters/gofmt"
+ "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt"
+ "github.com/golangci/golangci-lint/pkg/goformatters/goimports"
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/golangci-lint/pkg/result"
"github.com/golangci/golangci-lint/pkg/timeutils"
@@ -18,19 +29,23 @@ import (
var _ Processor = (*Fixer)(nil)
+const filePerm = 0644
+
type Fixer struct {
cfg *config.Config
log logutils.Log
fileCache *fsutils.FileCache
sw *timeutils.Stopwatch
+ formatter *goformatters.MetaFormatter
}
-func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache) *Fixer {
+func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache, formatter *goformatters.MetaFormatter) *Fixer {
return &Fixer{
cfg: cfg,
log: log,
fileCache: fileCache,
sw: timeutils.NewStopwatch("fixer", log),
+ formatter: formatter,
}
}
@@ -43,218 +58,246 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) {
return issues, nil
}
- outIssues := make([]result.Issue, 0, len(issues))
- issuesToFixPerFile := map[string][]result.Issue{}
- for i := range issues {
- issue := &issues[i]
- if issue.Replacement == nil {
- outIssues = append(outIssues, *issue)
- continue
- }
+ p.log.Infof("Applying suggested fixes")
- issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue)
- }
-
- for file, issuesToFix := range issuesToFixPerFile {
- err := p.sw.TrackStageErr("all", func() error {
- return p.fixIssuesInFile(file, issuesToFix)
- })
- if err != nil {
- p.log.Errorf("Failed to fix issues in file %s: %s", file, err)
-
- // show issues only if can't fix them
- outIssues = append(outIssues, issuesToFix...)
- }
+ notFixableIssues, err := timeutils.TrackStage(p.sw, "all", func() ([]result.Issue, error) {
+ return p.process(issues)
+ })
+ if err != nil {
+ p.log.Warnf("Failed to fix issues: %v", err)
}
p.printStat()
- return outIssues, nil
+ return notFixableIssues, nil
}
-func (Fixer) Finish() {}
+//nolint:funlen,gocyclo // This function should not be split.
+func (p Fixer) process(issues []result.Issue) ([]result.Issue, error) {
+ // filenames / linters / edits
+ editsByLinter := make(map[string]map[string][]diff.Edit)
-func (p Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error {
- // TODO: don't read the whole file into memory: read line by line;
- // can't just use bufio.scanner: it has a line length limit
- origFileData, err := p.fileCache.GetFileBytes(filePath)
- if err != nil {
- return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err)
- }
+ formatters := []string{gofumpt.Name, goimports.Name, gofmt.Name, gci.Name}
- origFileLines := bytes.Split(origFileData, []byte("\n"))
+ var notFixableIssues []result.Issue
- tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath)))
+ toBeFormattedFiles := make(map[string]struct{})
- tmpOutFile, err := os.Create(tmpFileName)
- if err != nil {
- return fmt.Errorf("failed to make file %s: %w", tmpFileName, err)
- }
-
- // merge multiple issues per line into one issue
- issuesPerLine := map[int][]result.Issue{}
for i := range issues {
- issue := &issues[i]
- issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue)
- }
+ issue := issues[i]
- issues = issues[:0] // reuse the same memory
- for line, lineIssues := range issuesPerLine {
- if mergedIssue := p.mergeLineIssues(line, lineIssues, origFileLines); mergedIssue != nil {
- issues = append(issues, *mergedIssue)
+ if slices.Contains(formatters, issue.FromLinter) {
+ toBeFormattedFiles[issue.FilePath()] = struct{}{}
+ continue
}
- }
- issues = p.findNotIntersectingIssues(issues)
+ if issue.SuggestedFixes == nil || skipNoTextEdit(&issue) {
+ notFixableIssues = append(notFixableIssues, issue)
+ continue
+ }
- if err = p.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil {
- tmpOutFile.Close()
- _ = robustio.RemoveAll(tmpOutFile.Name())
- return err
+ for _, sf := range issue.SuggestedFixes {
+ for _, edit := range sf.TextEdits {
+ start, end := edit.Pos, edit.End
+ if start > end {
+ return nil, fmt.Errorf("%q suggests invalid fix: pos (%v) > end (%v)",
+ issue.FromLinter, edit.Pos, edit.End)
+ }
+
+ edit := diff.Edit{
+ Start: int(start),
+ End: int(end),
+ New: string(edit.NewText),
+ }
+
+ if _, ok := editsByLinter[issue.FilePath()]; !ok {
+ editsByLinter[issue.FilePath()] = make(map[string][]diff.Edit)
+ }
+
+ editsByLinter[issue.FilePath()][issue.FromLinter] = append(editsByLinter[issue.FilePath()][issue.FromLinter], edit)
+ }
+ }
}
- tmpOutFile.Close()
+ // Validate and group the edits to each actual file.
+ editsByPath := make(map[string][]diff.Edit)
+ for path, linterToEdits := range editsByLinter {
+ excludedLinters := make(map[string]struct{})
- if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil {
- _ = robustio.RemoveAll(tmpOutFile.Name())
- return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err)
- }
+ linters := maps.Keys(linterToEdits)
- return nil
-}
+ // Does any linter create conflicting edits?
+ for _, linter := range linters {
+ edits := linterToEdits[linter]
+ if _, invalid := validateEdits(edits); invalid > 0 {
+ name, x, y := linter, edits[invalid-1], edits[invalid]
+ excludedLinters[name] = struct{}{}
-func (p Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileLines [][]byte) *result.Issue {
- origLine := origFileLines[lineNum-1] // lineNum is 1-based
+ err := diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y})
+ // TODO(ldez) TUI?
+ p.log.Warnf("Changes related to %q are skipped for the file %q: %v",
+ name, path, err)
+ }
+ }
- if len(lineIssues) == 1 && lineIssues[0].Replacement.Inline == nil {
- return &lineIssues[0]
- }
+ // Does any pair of different linters create edits that conflict?
+ for j := range linters {
+ for k := range linters[:j] {
+ x, y := linters[j], linters[k]
+ if x > y {
+ x, y = y, x
+ }
- // check issues first
- for ind := range lineIssues {
- li := &lineIssues[ind]
+ _, foundX := excludedLinters[x]
+ _, foundY := excludedLinters[y]
+ if foundX || foundY {
+ continue
+ }
- if li.LineRange != nil {
- p.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues)
- return &lineIssues[0]
- }
+ xedits, yedits := linterToEdits[x], linterToEdits[y]
- inline := li.Replacement.Inline
+ combined := slices.Concat(xedits, yedits)
- if inline == nil || len(li.Replacement.NewLines) != 0 || li.Replacement.NeedOnlyDelete {
- p.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues)
- return li
+ if _, invalid := validateEdits(combined); invalid > 0 {
+ excludedLinters[x] = struct{}{}
+ p.log.Warnf("Changes related to %q are skipped for the file %q due to conflicts with %q.", x, path, y)
+ }
+ }
}
- if inline.StartCol < 0 || inline.Length <= 0 || inline.StartCol+inline.Length > len(origLine) {
- p.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, li, inline)
- return nil
+ var edits []diff.Edit
+ for linter := range linterToEdits {
+ if _, found := excludedLinters[linter]; !found {
+ edits = append(edits, linterToEdits[linter]...)
+ }
}
- }
- return p.applyInlineFixes(lineIssues, origLine, lineNum)
-}
+ editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated.
+ }
-func (p Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, lineNum int) *result.Issue {
- sort.Slice(lineIssues, func(i, j int) bool {
- return lineIssues[i].Replacement.Inline.StartCol < lineIssues[j].Replacement.Inline.StartCol
- })
+ var editError error
- var newLineBuf bytes.Buffer
- newLineBuf.Grow(len(origLine))
+ var formattedFiles []string
- //nolint:misspell // misspelling is intentional
- // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because"
+ // Now we've got a set of valid edits for each file. Apply them.
+ for path, edits := range editsByPath {
+ contents, err := p.fileCache.GetFileBytes(path)
+ if err != nil {
+ editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err))
+ continue
+ }
- curOrigLinePos := 0
- for i := range lineIssues {
- fix := lineIssues[i].Replacement.Inline
- if fix.StartCol < curOrigLinePos {
- p.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues)
- return nil
+ out, err := diff.ApplyBytes(contents, edits)
+ if err != nil {
+ editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err))
+ continue
}
- if curOrigLinePos != fix.StartCol {
- newLineBuf.Write(origLine[curOrigLinePos:fix.StartCol])
+ // Try to format the file.
+ out = p.formatter.Format(path, out)
+
+ if err := os.WriteFile(path, out, filePerm); err != nil {
+ editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err))
+ continue
}
- newLineBuf.WriteString(fix.NewString)
- curOrigLinePos = fix.StartCol + fix.Length
- }
- if curOrigLinePos != len(origLine) {
- newLineBuf.Write(origLine[curOrigLinePos:])
+
+ formattedFiles = append(formattedFiles, path)
}
- mergedIssue := lineIssues[0] // use text from the first issue (it's not really used)
- mergedIssue.Replacement = &result.Replacement{
- NewLines: []string{newLineBuf.String()},
+ for path := range toBeFormattedFiles {
+ // Skips files already formatted by the previous fix step.
+ if !slices.Contains(formattedFiles, path) {
+ content, err := p.fileCache.GetFileBytes(path)
+ if err != nil {
+ p.log.Warnf("Error reading file %s: %v", path, err)
+ continue
+ }
+
+ out := p.formatter.Format(path, content)
+
+ if err := os.WriteFile(path, out, filePerm); err != nil {
+ editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err))
+ continue
+ }
+ }
}
- return &mergedIssue
+
+ return notFixableIssues, editError
}
-func (p Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue {
- sort.SliceStable(issues, func(i, j int) bool {
- a, b := issues[i], issues[j]
- return a.Line() < b.Line()
- })
+func (Fixer) Finish() {}
- var ret []result.Issue
- var currentEnd int
- for i := range issues {
- issue := &issues[i]
- rng := issue.GetLineRange()
- if rng.From <= currentEnd {
- p.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd)
- continue // skip intersecting issue
+func (p Fixer) printStat() {
+ p.sw.PrintStages()
+}
+
+func skipNoTextEdit(issue *result.Issue) bool {
+ var onlyMessage int
+ for _, sf := range issue.SuggestedFixes {
+ if len(sf.TextEdits) == 0 {
+ onlyMessage++
}
- p.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange())
- ret = append(ret, *issue)
- currentEnd = rng.To
}
- return ret
+ return len(issue.SuggestedFixes) == onlyMessage
}
-func (p Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmpOutFile *os.File) error {
- // issues aren't intersecting
+// validateEdits returns a list of edits that is sorted and
+// contains no duplicate edits. Returns the index of some
+// overlapping adjacent edits if there is one and <0 if the
+// edits are valid.
+//
+//nolint:gocritic // Copy of go/analysis/internal/checker/checker.go
+func validateEdits(edits []diff.Edit) ([]diff.Edit, int) {
+ if len(edits) == 0 {
+ return nil, -1
+ }
- nextIssueIndex := 0
- for i := 0; i < len(origFileLines); i++ {
- var outLine string
- var nextIssue *result.Issue
- if nextIssueIndex != len(issues) {
- nextIssue = &issues[nextIssueIndex]
- }
+ equivalent := func(x, y diff.Edit) bool {
+ return x.Start == y.Start && x.End == y.End && x.New == y.New
+ }
- origFileLineNumber := i + 1
- if nextIssue == nil || origFileLineNumber != nextIssue.GetLineRange().From {
- outLine = string(origFileLines[i])
- } else {
- nextIssueIndex++
- rng := nextIssue.GetLineRange()
- if rng.From > rng.To {
- // Maybe better decision is to skip such issues, re-evaluate if regressed.
- p.log.Warnf("[fixer]: issue line range is probably invalid, fix can be incorrect (from=%d, to=%d, linter=%s)",
- rng.From, rng.To, nextIssue.FromLinter,
- )
- }
- i += rng.To - rng.From
- if nextIssue.Replacement.NeedOnlyDelete {
- continue
+ diff.SortEdits(edits)
+
+ unique := []diff.Edit{edits[0]}
+
+ invalid := -1
+
+ for i := 1; i < len(edits); i++ {
+ prev, cur := edits[i-1], edits[i]
+ // We skip over equivalent edits without considering them
+ // an error. This handles identical edits coming from the
+ // multiple ways of loading a package into a
+ // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]".
+ if !equivalent(prev, cur) {
+ unique = append(unique, cur)
+ if prev.End > cur.Start {
+ invalid = i
}
- outLine = strings.Join(nextIssue.Replacement.NewLines, "\n")
}
+ }
+ return unique, invalid
+}
- if i < len(origFileLines)-1 {
- outLine += "\n"
- }
- if _, err := tmpOutFile.WriteString(outLine); err != nil {
- return fmt.Errorf("failed to write output line: %w", err)
- }
+// diff3Conflict returns an error describing two conflicting sets of
+// edits on a file at path.
+// Copy of go/analysis/internal/checker/checker.go
+func diff3Conflict(path, xlabel, ylabel string, xedits, yedits []diff.Edit) error {
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return err
}
+ oldlabel, old := "base", string(contents)
- return nil
-}
+ xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines)
+ if err != nil {
+ return err
+ }
+ ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines)
+ if err != nil {
+ return err
+ }
-func (p Fixer) printStat() {
- p.sw.PrintStages()
+ return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s",
+ xlabel, ylabel, path, xdiff, ydiff)
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go
index 0680c3f296..690cdf3f8a 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go
@@ -34,7 +34,7 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) {
}
return filterIssuesUnsafe(issues, func(issue *result.Issue) bool {
- if issue.Replacement != nil && p.cfg.Issues.NeedFix {
+ if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix {
// we need to fix all issues at once => we need to return all of them
return true
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go
index 1647cace09..0d1c286282 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go
@@ -36,7 +36,7 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) {
}
return filterIssuesUnsafe(issues, func(issue *result.Issue) bool {
- if issue.Replacement != nil && p.cfg.Issues.NeedFix {
+ if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix {
// we need to fix all issues at once => we need to return all of them
return true
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go
index 93a26586d6..fcd65326f9 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go
@@ -1,6 +1,7 @@
package processors
import (
+ "cmp"
"regexp"
"github.com/golangci/golangci-lint/pkg/config"
@@ -67,10 +68,7 @@ func (p *Severity) transform(issue *result.Issue) *result.Issue {
return issue
}
- issue.Severity = rule.severity
- if issue.Severity == "" {
- issue.Severity = p.defaultSeverity
- }
+ issue.Severity = cmp.Or(rule.severity, p.defaultSeverity)
return issue
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go
index 4da73c72ea..7eebea6315 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go
@@ -1,10 +1,9 @@
package processors
import (
- "errors"
+ "cmp"
"fmt"
"slices"
- "sort"
"strings"
"github.com/golangci/golangci-lint/pkg/config"
@@ -22,24 +21,32 @@ const (
orderNameSeverity = "severity"
)
+const (
+ less = iota - 1
+ equal
+ greater
+)
+
var _ Processor = (*SortResults)(nil)
+type issueComparator func(a, b *result.Issue) int
+
type SortResults struct {
- cmps map[string]*comparator
+ cmps map[string][]issueComparator
cfg *config.Output
}
func NewSortResults(cfg *config.Config) *SortResults {
return &SortResults{
- cmps: map[string]*comparator{
+ cmps: map[string][]issueComparator{
// For sorting we are comparing (in next order):
// file names, line numbers, position, and finally - giving up.
- orderNameFile: byFileName().SetNext(byLine().SetNext(byColumn())),
+ orderNameFile: {byFileName, byLine, byColumn},
// For sorting we are comparing: linter name
- orderNameLinter: byLinter(),
+ orderNameLinter: {byLinter},
// For sorting we are comparing: severity
- orderNameSeverity: bySeverity(),
+ orderNameSeverity: {bySeverity},
},
cfg: &cfg.Output,
}
@@ -57,23 +64,21 @@ func (p SortResults) Process(issues []result.Issue) ([]result.Issue, error) {
p.cfg.SortOrder = []string{orderNameFile}
}
- var cmps []*comparator
+ var cmps []issueComparator
+
for _, name := range p.cfg.SortOrder {
c, ok := p.cmps[name]
if !ok {
return nil, fmt.Errorf("unsupported sort-order name %q", name)
}
- cmps = append(cmps, c)
+ cmps = append(cmps, c...)
}
- cmp, err := mergeComparators(cmps)
- if err != nil {
- return nil, err
- }
+ comp := mergeComparators(cmps...)
- sort.Slice(issues, func(i, j int) bool {
- return cmp.Compare(&issues[i], &issues[j]) == less
+ slices.SortFunc(issues, func(a, b result.Issue) int {
+ return comp(&a, &b)
})
return issues, nil
@@ -81,147 +86,32 @@ func (p SortResults) Process(issues []result.Issue) ([]result.Issue, error) {
func (SortResults) Finish() {}
-type compareResult int
-
-const (
- less compareResult = iota - 1
- equal
- greater
- none
-)
-
-func (c compareResult) isNeutral() bool {
- // return true if compare result is incomparable or equal.
- return c == none || c == equal
-}
-
-func (c compareResult) String() string {
- switch c {
- case less:
- return "less"
- case equal:
- return "equal"
- case greater:
- return "greater"
- default:
- return "none"
- }
-}
-
-// comparator describes how to implement compare for two "issues".
-type comparator struct {
- name string
- compare func(a, b *result.Issue) compareResult
- next *comparator
-}
-
-func (cmp *comparator) Next() *comparator { return cmp.next }
-
-func (cmp *comparator) SetNext(c *comparator) *comparator {
- cmp.next = c
- return cmp
+func byFileName(a, b *result.Issue) int {
+ return strings.Compare(a.FilePath(), b.FilePath())
}
-func (cmp *comparator) String() string {
- s := cmp.name
- if cmp.Next() != nil {
- s += " > " + cmp.Next().String()
- }
-
- return s
+func byLine(a, b *result.Issue) int {
+ return numericCompare(a.Line(), b.Line())
}
-func (cmp *comparator) Compare(a, b *result.Issue) compareResult {
- res := cmp.compare(a, b)
- if !res.isNeutral() {
- return res
- }
-
- if next := cmp.Next(); next != nil {
- return next.Compare(a, b)
- }
-
- return res
+func byColumn(a, b *result.Issue) int {
+ return numericCompare(a.Column(), b.Column())
}
-func byFileName() *comparator {
- return &comparator{
- name: "byFileName",
- compare: func(a, b *result.Issue) compareResult {
- return compareResult(strings.Compare(a.FilePath(), b.FilePath()))
- },
- }
+func byLinter(a, b *result.Issue) int {
+ return strings.Compare(a.FromLinter, b.FromLinter)
}
-func byLine() *comparator {
- return &comparator{
- name: "byLine",
- compare: func(a, b *result.Issue) compareResult {
- return numericCompare(a.Line(), b.Line())
- },
- }
+func bySeverity(a, b *result.Issue) int {
+ return severityCompare(a.Severity, b.Severity)
}
-func byColumn() *comparator {
- return &comparator{
- name: "byColumn",
- compare: func(a, b *result.Issue) compareResult {
- return numericCompare(a.Column(), b.Column())
- },
- }
-}
-
-func byLinter() *comparator {
- return &comparator{
- name: "byLinter",
- compare: func(a, b *result.Issue) compareResult {
- return compareResult(strings.Compare(a.FromLinter, b.FromLinter))
- },
- }
-}
-
-func bySeverity() *comparator {
- return &comparator{
- name: "bySeverity",
- compare: func(a, b *result.Issue) compareResult {
- return severityCompare(a.Severity, b.Severity)
- },
- }
-}
-
-func mergeComparators(cmps []*comparator) (*comparator, error) {
- if len(cmps) == 0 {
- return nil, errors.New("no comparator")
- }
-
- for i := range len(cmps) - 1 {
- findComparatorTip(cmps[i]).SetNext(cmps[i+1])
- }
-
- return cmps[0], nil
-}
-
-func findComparatorTip(cmp *comparator) *comparator {
- if cmp.Next() != nil {
- return findComparatorTip(cmp.Next())
- }
-
- return cmp
-}
-
-func severityCompare(a, b string) compareResult {
+func severityCompare(a, b string) int {
// The position inside the slice define the importance (lower to higher).
classic := []string{"low", "medium", "high", "warning", "error"}
if slices.Contains(classic, a) && slices.Contains(classic, b) {
- switch {
- case slices.Index(classic, a) > slices.Index(classic, b):
- return greater
- case slices.Index(classic, a) < slices.Index(classic, b):
- return less
- default:
- return equal
- }
+ return cmp.Compare(slices.Index(classic, a), slices.Index(classic, b))
}
if slices.Contains(classic, a) {
@@ -232,28 +122,27 @@ func severityCompare(a, b string) compareResult {
return less
}
- return compareResult(strings.Compare(a, b))
+ return strings.Compare(a, b)
}
-func numericCompare(a, b int) compareResult {
- var (
- isValuesInvalid = a < 0 || b < 0
- isZeroValuesBoth = a == 0 && b == 0
- isEqual = a == b
- isZeroValueInA = b > 0 && a == 0
- isZeroValueInB = a > 0 && b == 0
- )
-
- switch {
- case isZeroValuesBoth || isEqual:
+func numericCompare(a, b int) int {
+ // Negative values and zeros are skipped (equal) because they either invalid or "neutral" (default int value).
+ if a <= 0 || b <= 0 {
return equal
- case isValuesInvalid || isZeroValueInA || isZeroValueInB:
- return none
- case a > b:
- return greater
- case a < b:
- return less
}
- return equal
+ return cmp.Compare(a, b)
+}
+
+func mergeComparators(comps ...issueComparator) issueComparator {
+ return func(a, b *result.Issue) int {
+ for _, comp := range comps {
+ i := comp(a, b)
+ if i != equal {
+ return i
+ }
+ }
+
+ return equal
+ }
}
diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go
index 115196d9a7..ec134f25f6 100644
--- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go
+++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go
@@ -26,7 +26,7 @@ func (*UniqByLine) Name() string {
}
func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) {
- if !p.cfg.Output.UniqByLine {
+ if !p.cfg.Issues.UniqByLine {
return issues, nil
}
@@ -36,7 +36,7 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) {
func (*UniqByLine) Finish() {}
func (p *UniqByLine) shouldPassIssue(issue *result.Issue) bool {
- if issue.Replacement != nil && p.cfg.Issues.NeedFix {
+ if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix {
// if issue will be auto-fixed we shouldn't collapse issues:
// e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them.
return true
diff --git a/hack/tools/vendor/github.com/golangci/modinfo/.gitignore b/hack/tools/vendor/github.com/golangci/modinfo/.gitignore
deleted file mode 100644
index 9f11b755a1..0000000000
--- a/hack/tools/vendor/github.com/golangci/modinfo/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.idea/
diff --git a/hack/tools/vendor/github.com/golangci/modinfo/.golangci.yml b/hack/tools/vendor/github.com/golangci/modinfo/.golangci.yml
deleted file mode 100644
index 9698182f2a..0000000000
--- a/hack/tools/vendor/github.com/golangci/modinfo/.golangci.yml
+++ /dev/null
@@ -1,157 +0,0 @@
-run:
- timeout: 7m
-
-linters-settings:
- govet:
- enable:
- - shadow
- gocyclo:
- min-complexity: 12
- goconst:
- min-len: 3
- min-occurrences: 3
- funlen:
- lines: -1
- statements: 50
- misspell:
- locale: US
- depguard:
- rules:
- main:
- deny:
- - pkg: "github.com/instana/testify"
- desc: not allowed
- - pkg: "github.com/pkg/errors"
- desc: Should be replaced by standard lib errors package
- tagalign:
- align: false
- order:
- - xml
- - json
- - yaml
- - yml
- - toml
- - mapstructure
- - url
- godox:
- keywords:
- - FIXME
- gocritic:
- enabled-tags:
- - diagnostic
- - style
- - performance
- disabled-checks:
- - paramTypeCombine # already handle by gofumpt.extra-rules
- - whyNoLint # already handle by nonolint
- - unnamedResult
- - hugeParam
- - sloppyReassign
- - rangeValCopy
- - octalLiteral
- - ptrToRefParam
- - appendAssign
- - ruleguard
- - httpNoBody
- - exposedSyncMutex
- revive:
- rules:
- - name: struct-tag
- - name: blank-imports
- - name: context-as-argument
- - name: context-keys-type
- - name: dot-imports
- - name: error-return
- - name: error-strings
- - name: error-naming
- - name: exported
- disabled: true
- - name: if-return
- - name: increment-decrement
- - name: var-naming
- - name: var-declaration
- - name: package-comments
- disabled: true
- - name: range
- - name: receiver-naming
- - name: time-naming
- - name: unexported-return
- - name: indent-error-flow
- - name: errorf
- - name: empty-block
- - name: superfluous-else
- - name: unused-parameter
- disabled: true
- - name: unreachable-code
- - name: redefines-builtin-id
-
- tagliatelle:
- case:
- rules:
- json: pascal
- yaml: camel
- xml: camel
- header: header
- mapstructure: camel
- env: upperSnake
- envconfig: upperSnake
-
-linters:
- enable-all: true
- disable:
- - deadcode # deprecated
- - exhaustivestruct # deprecated
- - golint # deprecated
- - ifshort # deprecated
- - interfacer # deprecated
- - maligned # deprecated
- - nosnakecase # deprecated
- - scopelint # deprecated
- - structcheck # deprecated
- - varcheck # deprecated
- - cyclop # duplicate of gocyclo
- - sqlclosecheck # not relevant (SQL)
- - rowserrcheck # not relevant (SQL)
- - execinquery # not relevant (SQL)
- - lll
- - gosec
- - dupl # not relevant
- - prealloc # too many false-positive
- - bodyclose # too many false-positive
- - gomnd
- - testpackage # not relevant
- - tparallel # not relevant
- - paralleltest # not relevant
- - nestif # too many false-positive
- - wrapcheck
- - goerr113 # not relevant
- - nlreturn # not relevant
- - wsl # not relevant
- - exhaustive # not relevant
- - exhaustruct # not relevant
- - makezero # not relevant
- - forbidigo
- - varnamelen # not relevant
- - nilnil # not relevant
- - ireturn # not relevant
- - contextcheck # too many false-positive
- - tenv # we already have a test "framework" to handle env vars
- - noctx
- - errchkjson
- - nonamedreturns
- - gosmopolitan # not relevant
- - gochecknoglobals
-
-issues:
- exclude-use-default: false
- max-issues-per-linter: 0
- max-same-issues: 0
- exclude:
- - 'Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked'
- - 'ST1000: at least one file in a package should have a package comment'
- exclude-rules:
- - path: (.+)_test.go
- linters:
- - funlen
- - goconst
- - maintidx
diff --git a/hack/tools/vendor/github.com/golangci/modinfo/LICENSE b/hack/tools/vendor/github.com/golangci/modinfo/LICENSE
deleted file mode 100644
index f288702d2f..0000000000
--- a/hack/tools/vendor/github.com/golangci/modinfo/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
diff --git a/hack/tools/vendor/github.com/golangci/modinfo/Makefile b/hack/tools/vendor/github.com/golangci/modinfo/Makefile
deleted file mode 100644
index df91018f11..0000000000
--- a/hack/tools/vendor/github.com/golangci/modinfo/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-.PHONY: clean check test
-
-default: clean check test
-
-clean:
- rm -rf dist/ cover.out
-
-test: clean
- go test -v -cover ./...
-
-check:
- golangci-lint run
diff --git a/hack/tools/vendor/github.com/golangci/modinfo/module.go b/hack/tools/vendor/github.com/golangci/modinfo/module.go
deleted file mode 100644
index ff0b21b9b8..0000000000
--- a/hack/tools/vendor/github.com/golangci/modinfo/module.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package modinfo
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "reflect"
- "sort"
- "strings"
- "sync"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/go/analysis"
-)
-
-type ModInfo struct {
- Path string `json:"Path"`
- Dir string `json:"Dir"`
- GoMod string `json:"GoMod"`
- GoVersion string `json:"GoVersion"`
- Main bool `json:"Main"`
-}
-
-var (
- once sync.Once
- information []ModInfo
- errInfo error
-)
-
-var Analyzer = &analysis.Analyzer{
- Name: "modinfo",
- Doc: "Module information",
- URL: "https://github.com/golangci/modinfo",
- Run: runOnce,
- ResultType: reflect.TypeOf([]ModInfo(nil)),
-}
-
-func runOnce(pass *analysis.Pass) (any, error) {
- _, ok := os.LookupEnv("MODINFO_DEBUG_DISABLE_ONCE")
- if ok {
- return GetModuleInfo(pass)
- }
-
- once.Do(func() {
- information, errInfo = GetModuleInfo(pass)
- })
-
- return information, errInfo
-}
-
-// GetModuleInfo gets modules information.
-// Always returns 1 element except for workspace (returns all the modules of the workspace).
-// Based on `go list -m -json` behavior.
-func GetModuleInfo(pass *analysis.Pass) ([]ModInfo, error) {
- // https://github.com/golang/go/issues/44753#issuecomment-790089020
- cmd := exec.Command("go", "list", "-m", "-json")
- for _, file := range pass.Files {
- name := pass.Fset.File(file.Pos()).Name()
- if filepath.Ext(name) != ".go" {
- continue
- }
-
- cmd.Dir = filepath.Dir(name)
- break
- }
-
- out, err := cmd.Output()
- if err != nil {
- return nil, fmt.Errorf("command go list: %w: %s", err, string(out))
- }
-
- var infos []ModInfo
-
- for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); {
- var v ModInfo
- if err := dec.Decode(&v); err != nil {
- return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out))
- }
-
- if v.GoMod == "" {
- return nil, errors.New("working directory is not part of a module")
- }
-
- if !v.Main || v.Dir == "" {
- continue
- }
-
- infos = append(infos, v)
- }
-
- if len(infos) == 0 {
- return nil, errors.New("go.mod file not found")
- }
-
- sort.Slice(infos, func(i, j int) bool {
- return len(infos[i].Path) > len(infos[j].Path)
- })
-
- return infos, nil
-}
-
-// FindModuleFromPass finds the module related to the files of the pass.
-func FindModuleFromPass(pass *analysis.Pass) (ModInfo, error) {
- infos, ok := pass.ResultOf[Analyzer].([]ModInfo)
- if !ok {
- return ModInfo{}, errors.New("no modinfo analyzer result")
- }
-
- var name string
- for _, file := range pass.Files {
- f := pass.Fset.File(file.Pos()).Name()
- if filepath.Ext(f) != ".go" {
- continue
- }
-
- name = f
- break
- }
-
- // no Go file found in analysis pass
- if name == "" {
- name, _ = os.Getwd()
- }
-
- for _, info := range infos {
- if !strings.HasPrefix(name, info.Dir) {
- continue
- }
- return info, nil
- }
-
- return ModInfo{}, errors.New("module information not found")
-}
-
-// ReadModuleFileFromPass read the `go.mod` file from the pass result.
-func ReadModuleFileFromPass(pass *analysis.Pass) (*modfile.File, error) {
- info, err := FindModuleFromPass(pass)
- if err != nil {
- return nil, err
- }
-
- return ReadModuleFile(info)
-}
-
-// ReadModuleFile read the `go.mod` file.
-func ReadModuleFile(info ModInfo) (*modfile.File, error) {
- raw, err := os.ReadFile(info.GoMod)
- if err != nil {
- return nil, fmt.Errorf("reading go.mod file: %w", err)
- }
-
- return modfile.Parse("go.mod", raw, nil)
-}
diff --git a/hack/tools/vendor/github.com/golangci/modinfo/readme.md b/hack/tools/vendor/github.com/golangci/modinfo/readme.md
deleted file mode 100644
index 2175de8eb4..0000000000
--- a/hack/tools/vendor/github.com/golangci/modinfo/readme.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# modinfo
-
-This module contains:
-- an analyzer that returns module information.
-- methods to find and read `go.mod` file
-
-## Examples
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/golangci/modinfo"
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
-)
-
-var Analyzer = &analysis.Analyzer{
- Name: "example",
- Doc: "Example",
- Run: func(pass *analysis.Pass) (interface{}, error) {
- file, err := modinfo.ReadModuleFileFromPass(pass)
- if err != nil {
- return nil, err
- }
-
- fmt.Println("go.mod", file)
-
- // TODO
-
- return nil, nil
- },
- Requires: []*analysis.Analyzer{
- inspect.Analyzer,
- modinfo.Analyzer,
- },
-}
-```
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/golangci/modinfo"
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
-)
-
-var Analyzer = &analysis.Analyzer{
- Name: "example",
- Doc: "Example",
- Run: func(pass *analysis.Pass) (interface{}, error) {
- info, err := modinfo.FindModuleFromPass(pass)
- if err != nil {
- return nil, err
- }
-
- fmt.Println("Module", info.Dir)
-
- // TODO
-
- return nil, nil
- },
- Requires: []*analysis.Analyzer{
- inspect.Analyzer,
- modinfo.Analyzer,
- },
-}
-```
diff --git a/hack/tools/vendor/github.com/google/cel-go/LICENSE b/hack/tools/vendor/github.com/google/cel-go/LICENSE
new file mode 100644
index 0000000000..2493ed2eb4
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/LICENSE
@@ -0,0 +1,233 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+===========================================================================
+The common/types/pb/equal.go modification of proto.Equal logic
+===========================================================================
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/cel/BUILD.bazel
new file mode 100644
index 0000000000..81549fb4c5
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -0,0 +1,91 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cel.go",
+ "decls.go",
+ "env.go",
+ "folding.go",
+ "io.go",
+ "inlining.go",
+ "library.go",
+ "macro.go",
+ "optimizer.go",
+ "options.go",
+ "program.go",
+ "validator.go",
+ ],
+ importpath = "github.com/google/cel-go/cel",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//checker:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//interpreter:go_default_library",
+ "//parser:go_default_library",
+ "@dev_cel_expr//:expr",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+ "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "cel_example_test.go",
+ "cel_test.go",
+ "decls_test.go",
+ "env_test.go",
+ "folding_test.go",
+ "io_test.go",
+ "inlining_test.go",
+ "optimizer_test.go",
+ "validator_test.go",
+ ],
+ data = [
+ "//cel/testdata:gen_test_fds",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//ext:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//encoding/prototext:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/cel.go b/hack/tools/vendor/github.com/google/cel-go/cel/cel.go
new file mode 100644
index 0000000000..eb5a9f4cc5
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/cel.go
@@ -0,0 +1,19 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cel defines the top-level interface for the Common Expression Language (CEL).
+//
+// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
+// expressions against user-defined environments.
+package cel
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/decls.go b/hack/tools/vendor/github.com/google/cel-go/cel/decls.go
new file mode 100644
index 0000000000..4188060210
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/decls.go
@@ -0,0 +1,370 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types.
+type Kind = types.Kind
+
+const (
+ // DynKind represents a dynamic type. This kind only exists at type-check time.
+ DynKind Kind = types.DynKind
+
+ // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
+ AnyKind = types.AnyKind
+
+ // BoolKind represents a boolean type.
+ BoolKind = types.BoolKind
+
+ // BytesKind represents a bytes type.
+ BytesKind = types.BytesKind
+
+ // DoubleKind represents a double type.
+ DoubleKind = types.DoubleKind
+
+ // DurationKind represents a CEL duration type.
+ DurationKind = types.DurationKind
+
+ // IntKind represents an integer type.
+ IntKind = types.IntKind
+
+ // ListKind represents a list type.
+ ListKind = types.ListKind
+
+ // MapKind represents a map type.
+ MapKind = types.MapKind
+
+ // NullTypeKind represents a null type.
+ NullTypeKind = types.NullTypeKind
+
+ // OpaqueKind represents an abstract type which has no accessible fields.
+ OpaqueKind = types.OpaqueKind
+
+ // StringKind represents a string type.
+ StringKind = types.StringKind
+
+ // StructKind represents a structured object with typed fields.
+ StructKind = types.StructKind
+
+ // TimestampKind represents a a CEL time type.
+ TimestampKind = types.TimestampKind
+
+ // TypeKind represents the CEL type.
+ TypeKind = types.TypeKind
+
+ // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
+ TypeParamKind = types.TypeParamKind
+
+ // UintKind represents a uint type.
+ UintKind = types.UintKind
+)
+
+var (
+ // AnyType represents the google.protobuf.Any type.
+ AnyType = types.AnyType
+ // BoolType represents the bool type.
+ BoolType = types.BoolType
+ // BytesType represents the bytes type.
+ BytesType = types.BytesType
+ // DoubleType represents the double type.
+ DoubleType = types.DoubleType
+ // DurationType represents the CEL duration type.
+ DurationType = types.DurationType
+ // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
+ DynType = types.DynType
+ // IntType represents the int type.
+ IntType = types.IntType
+ // NullType represents the type of a null value.
+ NullType = types.NullType
+ // StringType represents the string type.
+ StringType = types.StringType
+ // TimestampType represents the time type.
+ TimestampType = types.TimestampType
+ // TypeType represents a CEL type
+ TypeType = types.TypeType
+ // UintType represents a uint type.
+ UintType = types.UintType
+
+ // function references for instantiating new types.
+
+ // ListType creates an instances of a list type value with the provided element type.
+ ListType = types.NewListType
+ // MapType creates an instance of a map type value with the provided key and value types.
+ MapType = types.NewMapType
+ // NullableType creates an instance of a nullable type with the provided wrapped type.
+ //
+ // Note: only primitive types are supported as wrapped types.
+ NullableType = types.NewNullableType
+ // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+ OptionalType = types.NewOptionalType
+ // OpaqueType creates an abstract parameterized type with a given name.
+ OpaqueType = types.NewOpaqueType
+ // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type.
+ ObjectType = types.NewObjectType
+ // TypeParamType creates a parameterized type instance.
+ TypeParamType = types.NewTypeParamType
+)
+
+// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
+type Type = types.Type
+
+// Constant creates an instances of an identifier declaration with a variable name, type, and value.
+func Constant(name string, t *Type, v ref.Val) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewConstant(name, t, v))
+ return e, nil
+ }
+}
+
+// Variable creates an instance of a variable declaration with a variable name and type.
+func Variable(name string, t *Type) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewVariable(name, t))
+ return e, nil
+ }
+}
+
+// Function defines a function and overloads with optional singleton or per-overload bindings.
+//
+// Using Function is roughly equivalent to calling Declarations() to declare the function signatures
+// and Functions() to define the function bindings, if they have been defined. Specifying the
+// same function name more than once will result in the aggregation of the function overloads. If any
+// signatures conflict between the existing and new function definition an error will be raised.
+// However, if the signatures are identical and the overload ids are the same, the redefinition will
+// be considered a no-op.
+//
+// One key difference with using Function() is that each FunctionDecl provided will handle dynamic
+// dispatch based on the type-signatures of the overloads provided which means overload resolution at
+// runtime is handled out of the box rather than via a custom binding for overload resolution via
+// Functions():
+//
+// - Overloads are searched in the order they are declared
+// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
+//
+// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
+// - In the event that a default dispatch occurs, the first overload provided is the one invoked
+//
+// If you intend to use overloads which differentiate based on the key or element type of a list or
+// map, consider using a generic function instead: e.g. func(list(T)) or func(map(K, V)) as this
+// will allow your implementation to determine how best to handle dispatch and the default behavior
+// for empty lists and maps whose contents cannot be inspected.
+//
+// For functions which use parameterized opaque types (abstract types), consider using a singleton
+// function which is capable of inspecting the contents of the type and resolving the appropriate
+// overload as CEL can only make inferences by type-name regarding such types.
+func Function(name string, opts ...FunctionOpt) EnvOption {
+ return func(e *Env) (*Env, error) {
+ fn, err := decls.NewFunction(name, opts...)
+ if err != nil {
+ return nil, err
+ }
+ if existing, found := e.functions[fn.Name()]; found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.functions[fn.Name()] = fn
+ return e, nil
+ }
+}
+
+// FunctionOpt defines a functional option for configuring a function declaration.
+type FunctionOpt = decls.FunctionOpt
+
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonUnaryBinding(fn, traits...)
+}
+
+// SingletonBinaryImpl creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonBinaryBinding
+func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonFunctionImpl creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonFunctionBinding
+func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return decls.SingletonFunctionBinding(fn, traits...)
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return decls.SingletonFunctionBinding(fn, traits...)
+}
+
+// DisableDeclaration disables the function signatures, effectively removing them from the type-check
+// environment while preserving the runtime bindings.
+func DisableDeclaration(value bool) FunctionOpt {
+ return decls.DisableDeclaration(value)
+}
+
+// Overload defines a new global overload with an overload id, argument types, and result type. Through the
+// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
+// be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
+ return decls.Overload(overloadID, args, resultType, opts...)
+}
+
+// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
+// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
+// an operand trait, and to be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
+ return decls.MemberOverload(overloadID, args, resultType, opts...)
+}
+
+// OverloadOpt is a functional option for configuring a function overload.
+type OverloadOpt = decls.OverloadOpt
+
+// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
+ return decls.UnaryBinding(binding)
+}
+
+// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
+ return decls.BinaryBinding(binding)
+}
+
+// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
+ return decls.FunctionBinding(binding)
+}
+
+// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
+//
+// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
+func OverloadIsNonStrict() OverloadOpt {
+ return decls.OverloadIsNonStrict()
+}
+
+// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
+// successfully invoked.
+func OverloadOperandTrait(trait int) OverloadOpt {
+ return decls.OverloadOperandTrait(trait)
+}
+
+// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
+func TypeToExprType(t *Type) (*exprpb.Type, error) {
+ return types.TypeToExprType(t)
+}
+
+// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
+func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ return types.ExprTypeToType(t)
+}
+
+// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function.
+func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ return AlphaProtoAsDeclaration(d)
+}
+
+// AlphaProtoAsDeclaration converts a v1alpha1.Decl value describing a variable or function into an EnvOption.
+func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ canonical := &celpb.Decl{}
+ if err := convertProto(d, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsDeclaration(canonical)
+}
+
+// ProtoAsDeclaration converts a canonical celpb.Decl value describing a variable or function into an EnvOption.
+func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) {
+ switch d.GetDeclKind().(type) {
+ case *celpb.Decl_Function:
+ overloads := d.GetFunction().GetOverloads()
+ opts := make([]FunctionOpt, len(overloads))
+ for i, o := range overloads {
+ args := make([]*Type, len(o.GetParams()))
+ for j, p := range o.GetParams() {
+ a, err := types.ProtoAsType(p)
+ if err != nil {
+ return nil, err
+ }
+ args[j] = a
+ }
+ res, err := types.ProtoAsType(o.GetResultType())
+ if err != nil {
+ return nil, err
+ }
+ if o.IsInstanceFunction {
+ opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res)
+ } else {
+ opts[i] = decls.Overload(o.GetOverloadId(), args, res)
+ }
+ }
+ return Function(d.GetName(), opts...), nil
+ case *celpb.Decl_Ident:
+ t, err := types.ProtoAsType(d.GetIdent().GetType())
+ if err != nil {
+ return nil, err
+ }
+ if d.GetIdent().GetValue() == nil {
+ return Variable(d.GetName(), t), nil
+ }
+ val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return Constant(d.GetName(), t, val), nil
+ default:
+ return nil, fmt.Errorf("unsupported decl: %v", d)
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/env.go b/hack/tools/vendor/github.com/google/cel-go/cel/env.go
new file mode 100644
index 0000000000..ab736b7769
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/env.go
@@ -0,0 +1,894 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/google/cel-go/checker"
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common"
+ celast "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface representing a user-provided expression.
+type Source = common.Source
+
+// Ast representing the checked or unchecked expression, its source, and related metadata such as
+// source position information.
+type Ast struct {
+ source Source
+ impl *celast.AST
+}
+
+// NativeRep converts the AST to a Go-native representation.
+func (ast *Ast) NativeRep() *celast.AST {
+ if ast == nil {
+ return nil
+ }
+ return ast.impl
+}
+
+// Expr returns the proto serializable instance of the parsed/checked expression.
+//
+// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr()
+// the result instead.
+func (ast *Ast) Expr() *exprpb.Expr {
+ if ast == nil {
+ return nil
+ }
+ pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr())
+ return pbExpr
+}
+
+// IsChecked returns whether the Ast value has been successfully type-checked.
+func (ast *Ast) IsChecked() bool {
+ return ast.NativeRep().IsChecked()
+}
+
+// SourceInfo returns character offset and newline position information about expression elements.
+func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
+ if ast == nil {
+ return nil
+ }
+ pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo())
+ return pbInfo
+}
+
+// ResultType returns the output type of the expression if the Ast has been type-checked, else
+// returns chkdecls.Dyn as the parse step cannot infer the type.
+//
+// Deprecated: use OutputType
+func (ast *Ast) ResultType() *exprpb.Type {
+ out := ast.OutputType()
+ t, err := TypeToExprType(out)
+ if err != nil {
+ return chkdecls.Dyn
+ }
+ return t
+}
+
+// OutputType returns the output type of the expression if the Ast has been type-checked, else
+// returns cel.DynType as the parse step cannot infer types.
+func (ast *Ast) OutputType() *Type {
+ if ast == nil {
+ return types.ErrorType
+ }
+ return ast.NativeRep().GetType(ast.NativeRep().Expr().ID())
+}
+
+// Source returns a view of the input used to create the Ast. This source may be complete or
+// constructed from the SourceInfo.
+func (ast *Ast) Source() Source {
+ if ast == nil {
+ return nil
+ }
+ return ast.source
+}
+
+// FormatType converts a type message into a string representation.
+//
+// Deprecated: prefer FormatCELType
+func FormatType(t *exprpb.Type) string {
+ return checker.FormatCheckedType(t)
+}
+
+// FormatCELType formats a cel.Type value to a string representation.
+//
+// The type formatting is identical to FormatType.
+func FormatCELType(t *Type) string {
+ return checker.FormatCELType(t)
+}
+
+// Env encapsulates the context necessary to perform parsing, type checking, or generation of
+// evaluable programs for different expressions.
+type Env struct {
+ Container *containers.Container
+ variables []*decls.VariableDecl
+ functions map[string]*decls.FunctionDecl
+ macros []parser.Macro
+ adapter types.Adapter
+ provider types.Provider
+ features map[int]bool
+ appliedFeatures map[int]bool
+ libraries map[string]bool
+ validators []ASTValidator
+ costOptions []checker.CostOption
+
+ // Internal parser representation
+ prsr *parser.Parser
+ prsrOpts []parser.Option
+
+ // Internal checker representation
+ chkMutex sync.Mutex
+ chk *checker.Env
+ chkErr error
+ chkOnce sync.Once
+ chkOpts []checker.Option
+
+ // Program options tied to the environment
+ progOpts []ProgramOption
+}
+
+// NewEnv creates a program environment configured with the standard library of CEL functions and
+// macros. The Env value returned can parse and check any CEL program which builds upon the core
+// features documented in the CEL specification.
+//
+// See the EnvOption helper functions for the options that can be used to configure the
+// environment.
+func NewEnv(opts ...EnvOption) (*Env, error) {
+ // Extend the statically configured standard environment, disabling eager validation to ensure
+ // the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier
+ // releases. The user provided options can easily re-enable the eager validation as they are
+ // processed after this default option.
+ stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...)
+ env, err := getStdEnv()
+ if err != nil {
+ return nil, err
+ }
+ return env.Extend(stdOpts...)
+}
+
+// NewCustomEnv creates a custom program environment which is not automatically configured with the
+// standard library of functions and macros documented in the CEL spec.
+//
+// The purpose for using a custom environment might be for subsetting the standard library produced
+// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to
+// limit the compute and memory impact of a CEL program by controlling the functions and macros
+// that may appear in a given expression.
+//
+// See the EnvOption helper functions for the options that can be used to configure the
+// environment.
+func NewCustomEnv(opts ...EnvOption) (*Env, error) {
+ registry, err := types.NewRegistry()
+ if err != nil {
+ return nil, err
+ }
+ return (&Env{
+ variables: []*decls.VariableDecl{},
+ functions: map[string]*decls.FunctionDecl{},
+ macros: []parser.Macro{},
+ Container: containers.DefaultContainer,
+ adapter: registry,
+ provider: registry,
+ features: map[int]bool{},
+ appliedFeatures: map[int]bool{},
+ libraries: map[string]bool{},
+ validators: []ASTValidator{},
+ progOpts: []ProgramOption{},
+ costOptions: []checker.CostOption{},
+ }).configure(opts)
+}
+
+// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
+// If any `ASTValidators` are configured on the environment, they will be applied after a valid
+// type-check result. If any issues are detected, the validators will provide them on the
+// output Issues object.
+//
+// Either checking or validation has failed if the returned Issues value and its Issues.Err()
+// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a
+// fatal error.
+//
+// It is possible to have both non-nil Ast and Issues values returned from this call: however,
+// the mere presence of an Ast does not imply that it is valid for use.
+func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
+ // Construct the internal checker env, erroring if there is an issue adding the declarations.
+ chk, err := e.initChecker()
+ if err != nil {
+ errs := common.NewErrors(ast.Source())
+ errs.ReportError(common.NoLocation, err.Error())
+ return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
+ }
+
+ checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk)
+ if len(errs.GetErrors()) > 0 {
+ return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
+ }
+ // Manually create the Ast to ensure that the Ast source information (which may be more
+ // detailed than the information provided by Check), is returned to the caller.
+ ast = &Ast{
+ source: ast.Source(),
+ impl: checked}
+
+ // Avoid creating a validator config if it's not needed.
+ if len(e.validators) == 0 {
+ return ast, nil
+ }
+
+ // Generate a validator configuration from the set of configured validators.
+ vConfig := newValidatorConfig()
+ for _, v := range e.validators {
+ if cv, ok := v.(ASTValidatorConfigurer); ok {
+ cv.Configure(vConfig)
+ }
+ }
+ // Apply additional validators on the type-checked result.
+ iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
+ for _, v := range e.validators {
+ v.Validate(e, vConfig, checked, iss)
+ }
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ return ast, nil
+}
+
+// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
+// associated issues.
+//
+// If an error is encountered during parsing the Compile step will not continue with the Check
+// phase. If non-error issues are encountered during Parse, they may be combined with any issues
+// discovered during Check.
+//
+// Note, for parse-only uses of CEL use Parse.
+func (e *Env) Compile(txt string) (*Ast, *Issues) {
+ return e.CompileSource(common.NewTextSource(txt))
+}
+
+// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and
+// associated issues.
+//
+// If an error is encountered during parsing the CompileSource step will not continue with the
+// Check phase. If non-error issues are encountered during Parse, they may be combined with any
+// issues discovered during Check.
+//
+// Note, for parse-only uses of CEL use Parse.
+func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
+ ast, iss := e.ParseSource(src)
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ checked, iss2 := e.Check(ast)
+ if iss2.Err() != nil {
+ return nil, iss2
+ }
+ return checked, iss2
+}
+
+// Extend the current environment with additional options to produce a new Env.
+//
+// Note, the extended Env value should not share memory with the original. It is possible, however,
+// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable.
+// To ensure separation of state between extended environments either make sure the TypeAdapter and
+// TypeProvider are immutable, or that their underlying implementations are based on the
+// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
+func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
+ chk, chkErr := e.getCheckerOrError()
+ if chkErr != nil {
+ return nil, chkErr
+ }
+
+ prsrOptsCopy := make([]parser.Option, len(e.prsrOpts))
+ copy(prsrOptsCopy, e.prsrOpts)
+
+ // The type-checker is configured with Declarations. The declarations may either be provided
+ // as options which have not yet been validated, or may come from a previous checker instance
+ // whose types have already been validated.
+ chkOptsCopy := make([]checker.Option, len(e.chkOpts))
+ copy(chkOptsCopy, e.chkOpts)
+
+ // Copy the declarations if needed.
+ if chk != nil {
+ // If the type-checker has already been instantiated, then the e.declarations have been
+ // validated within the chk instance.
+ chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk))
+ }
+ varsCopy := make([]*decls.VariableDecl, len(e.variables))
+ copy(varsCopy, e.variables)
+
+ // Copy macros and program options
+ macsCopy := make([]parser.Macro, len(e.macros))
+ progOptsCopy := make([]ProgramOption, len(e.progOpts))
+ copy(macsCopy, e.macros)
+ copy(progOptsCopy, e.progOpts)
+
+ // Copy the adapter / provider if they appear to be mutable.
+ adapter := e.adapter
+ provider := e.provider
+ adapterReg, isAdapterReg := e.adapter.(*types.Registry)
+ providerReg, isProviderReg := e.provider.(*types.Registry)
+ // In most cases the provider and adapter will be a ref.TypeRegistry;
+ // however, in the rare cases where they are not, they are assumed to
+ // be immutable. Since it is possible to set the TypeProvider separately
+ // from the TypeAdapter, the possible configurations which could use a
+ // TypeRegistry as the base implementation are captured below.
+ if isAdapterReg && isProviderReg {
+ reg := providerReg.Copy()
+ provider = reg
+ // If the adapter and provider are the same object, set the adapter
+ // to the same ref.TypeRegistry as the provider.
+ if adapterReg == providerReg {
+ adapter = reg
+ } else {
+ // Otherwise, make a copy of the adapter.
+ adapter = adapterReg.Copy()
+ }
+ } else if isProviderReg {
+ provider = providerReg.Copy()
+ } else if isAdapterReg {
+ adapter = adapterReg.Copy()
+ }
+
+ featuresCopy := make(map[int]bool, len(e.features))
+ for k, v := range e.features {
+ featuresCopy[k] = v
+ }
+ appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures))
+ for k, v := range e.appliedFeatures {
+ appliedFeaturesCopy[k] = v
+ }
+ funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
+ for k, v := range e.functions {
+ funcsCopy[k] = v
+ }
+ libsCopy := make(map[string]bool, len(e.libraries))
+ for k, v := range e.libraries {
+ libsCopy[k] = v
+ }
+ validatorsCopy := make([]ASTValidator, len(e.validators))
+ copy(validatorsCopy, e.validators)
+ costOptsCopy := make([]checker.CostOption, len(e.costOptions))
+ copy(costOptsCopy, e.costOptions)
+
+ ext := &Env{
+ Container: e.Container,
+ variables: varsCopy,
+ functions: funcsCopy,
+ macros: macsCopy,
+ progOpts: progOptsCopy,
+ adapter: adapter,
+ features: featuresCopy,
+ appliedFeatures: appliedFeaturesCopy,
+ libraries: libsCopy,
+ validators: validatorsCopy,
+ provider: provider,
+ chkOpts: chkOptsCopy,
+ prsrOpts: prsrOptsCopy,
+ costOptions: costOptsCopy,
+ }
+ return ext.configure(opts)
+}
+
+// HasFeature checks whether the environment enables the given feature
+// flag, as enumerated in options.go.
+func (e *Env) HasFeature(flag int) bool {
+ enabled, has := e.features[flag]
+ return has && enabled
+}
+
+// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
+func (e *Env) HasLibrary(libName string) bool {
+ configured, exists := e.libraries[libName]
+ return exists && configured
+}
+
+// Libraries returns a list of SingletonLibrary that have been configured in the environment.
+func (e *Env) Libraries() []string {
+ libraries := make([]string, 0, len(e.libraries))
+ for libName := range e.libraries {
+ libraries = append(libraries, libName)
+ }
+ return libraries
+}
+
+// HasFunction returns whether a specific function has been configured in the environment
+func (e *Env) HasFunction(functionName string) bool {
+ _, ok := e.functions[functionName]
+ return ok
+}
+
+// Functions returns map of Functions, keyed by function name, that have been configured in the environment.
+func (e *Env) Functions() map[string]*decls.FunctionDecl {
+ return e.functions
+}
+
+// HasValidator returns whether a specific ASTValidator has been configured in the environment.
+func (e *Env) HasValidator(name string) bool {
+ for _, v := range e.validators {
+ if v.Name() == name {
+ return true
+ }
+ }
+ return false
+}
+
+// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
+//
+// This form of Parse creates a Source value for the input `txt` and forwards to the
+// ParseSource method.
+func (e *Env) Parse(txt string) (*Ast, *Issues) {
+ src := common.NewTextSource(txt)
+ return e.ParseSource(src)
+}
+
+// ParseSource parses the input source to an Ast and/or set of Issues.
+//
+// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
+// Issues should be inspected if they are non-nil, but may not represent a fatal error.
+//
+// It is possible to have both non-nil Ast and Issues values returned from this call; however,
+// the mere presence of an Ast does not imply that it is valid for use.
+func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
+ parsed, errs := e.prsr.Parse(src)
+ if len(errs.GetErrors()) > 0 {
+ return nil, &Issues{errs: errs}
+ }
+ return &Ast{source: src, impl: parsed}, nil
+}
+
+// Program generates an evaluable instance of the Ast within the environment (Env).
+func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
+ return e.PlanProgram(ast.NativeRep(), opts...)
+}
+
+// PlanProgram generates an evaluable instance of the AST in the go-native representation within
+// the environment (Env).
+func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) {
+ optSet := e.progOpts
+ if len(opts) != 0 {
+ mergedOpts := []ProgramOption{}
+ mergedOpts = append(mergedOpts, e.progOpts...)
+ mergedOpts = append(mergedOpts, opts...)
+ optSet = mergedOpts
+ }
+ return newProgram(e, a, optSet)
+}
+
+// CELTypeAdapter returns the `types.Adapter` configured for the environment.
+func (e *Env) CELTypeAdapter() types.Adapter {
+ return e.adapter
+}
+
+// CELTypeProvider returns the `types.Provider` configured for the environment.
+func (e *Env) CELTypeProvider() types.Provider {
+ return e.provider
+}
+
+// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
+//
+// Deprecated: use CELTypeAdapter()
+func (e *Env) TypeAdapter() ref.TypeAdapter {
+ return e.adapter
+}
+
+// TypeProvider returns the `ref.TypeProvider` configured for the environment.
+//
+// Deprecated: use CELTypeProvider()
+func (e *Env) TypeProvider() ref.TypeProvider {
+ if legacyProvider, ok := e.provider.(ref.TypeProvider); ok {
+ return legacyProvider
+ }
+ return &interopLegacyTypeProvider{Provider: e.provider}
+}
+
+// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
+// Env as unknown AttributePattern values.
+//
+// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
+// PartialAttributes option is provided as a ProgramOption.
+func (e *Env) UnknownVars() interpreter.PartialActivation {
+ act := interpreter.EmptyActivation()
+ part, _ := PartialVars(act, e.computeUnknownVars(act)...)
+ return part
+}
+
+// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
+// set, but which have been configured in the environment, are marked as unknown.
+//
+// The `vars` value may either be an interpreter.Activation or any valid input to the
+// interpreter.NewActivation call.
+//
+// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
+// variables. For more advanced use cases of partial state where portions of an object graph, rather
+// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
+//
+// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
+// PartialAttributes option is provided as a ProgramOption.
+func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
+ act, err := interpreter.NewActivation(vars)
+ if err != nil {
+ return nil, err
+ }
+ return PartialVars(act, e.computeUnknownVars(act)...)
+}
+
+// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
+// attribute references which are unknown.
+//
+// Residual expressions are beneficial in a few scenarios:
+//
+// - Optimizing constant expression evaluations away.
+// - Indexing and pruning expressions based on known input arguments.
+// - Surfacing additional requirements that are needed in order to complete an evaluation.
+// - Sharing the evaluation of an expression across multiple machines/nodes.
+//
+// For example, if an expression targets a 'resource' and 'request' attribute and the possible
+// values for the resource are known, a PartialActivation could mark the 'request' as an unknown
+// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts
+// of the expression that reference the 'request'.
+//
+// Note, the expression ids within the residual AST generated through this method have no
+// correlation to the expression ids of the original AST.
+//
+// See the PartialVars helper for how to construct a PartialActivation.
+//
+// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
+// Ast format and then Program again.
+func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
+ pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State())
+ newAST := &Ast{source: a.Source(), impl: pruned}
+ expr, err := AstToString(newAST)
+ if err != nil {
+ return nil, err
+ }
+ parsed, iss := e.Parse(expr)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ if !a.IsChecked() {
+ return parsed, nil
+ }
+ checked, iss := e.Check(parsed)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ return checked, nil
+}
+
+// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
+// extension functions provided by estimator.
+func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
+ extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
+ extendedOpts = append(extendedOpts, opts...)
+ extendedOpts = append(extendedOpts, e.costOptions...)
+ return checker.Cost(ast.impl, estimator, extendedOpts...)
+}
+
+// configure applies a series of EnvOptions to the current environment.
+func (e *Env) configure(opts []EnvOption) (*Env, error) {
+ // Customized the environment using the provided EnvOption values. If an error is
+ // generated at any step this, will be returned as a nil Env with a non-nil error.
+ var err error
+ for _, opt := range opts {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If the default UTC timezone fix has been enabled, make sure the library is configured
+ e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
+ if err != nil {
+ return nil, err
+ }
+
+ // Configure the parser.
+ prsrOpts := []parser.Option{}
+ prsrOpts = append(prsrOpts, e.prsrOpts...)
+ prsrOpts = append(prsrOpts, parser.Macros(e.macros...))
+
+ if e.HasFeature(featureEnableMacroCallTracking) {
+ prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
+ }
+ if e.HasFeature(featureVariadicLogicalASTs) {
+ prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
+ }
+ e.prsr, err = parser.NewParser(prsrOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure that the checker init happens eagerly rather than lazily.
+ if e.HasFeature(featureEagerlyValidateDeclarations) {
+ _, err := e.initChecker()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return e, nil
+}
+
+func (e *Env) initChecker() (*checker.Env, error) {
+ e.chkOnce.Do(func() {
+ chkOpts := []checker.Option{}
+ chkOpts = append(chkOpts, e.chkOpts...)
+ chkOpts = append(chkOpts,
+ checker.CrossTypeNumericComparisons(
+ e.HasFeature(featureCrossTypeNumericComparisons)))
+
+ ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ // Add the statically configured declarations.
+ err = ce.AddIdents(e.variables...)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ // Add the function declarations which are derived from the FunctionDecl instances.
+ for _, fn := range e.functions {
+ if fn.IsDeclarationDisabled() {
+ continue
+ }
+ err = ce.AddFunctions(fn)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ }
+ // Add function declarations here separately.
+ e.setCheckerOrError(ce, nil)
+ })
+ return e.getCheckerOrError()
+}
+
+// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) {
+ e.chkMutex.Lock()
+ e.chk = chk
+ e.chkErr = chkErr
+ e.chkMutex.Unlock()
+}
+
+// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) getCheckerOrError() (*checker.Env, error) {
+ e.chkMutex.Lock()
+ defer e.chkMutex.Unlock()
+ return e.chk, e.chkErr
+}
+
+// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
+// the feature if it has not already been enabled.
+func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
+ if !e.HasFeature(feature) {
+ return e, nil
+ }
+ _, applied := e.appliedFeatures[feature]
+ if applied {
+ return e, nil
+ }
+ e, err := option(e)
+ if err != nil {
+ return nil, err
+ }
+ // record that the feature has been applied since it will generate declarations
+ // and functions which will be propagated on Extend() calls and which should only
+ // be registered once.
+ e.appliedFeatures[feature] = true
+ return e, nil
+}
+
+// computeUnknownVars determines a set of missing variables based on the input activation and the
+// environment's configured declaration set.
+func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
+ var unknownPatterns []*interpreter.AttributePattern
+ for _, v := range e.variables {
+ varName := v.Name()
+ if _, found := vars.ResolveName(varName); found {
+ continue
+ }
+ unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName))
+ }
+ return unknownPatterns
+}
+
+// Error type which references an expression id, a location within source, and a message.
+type Error = common.Error
+
+// Issues defines methods for inspecting the error details of parse and check calls.
+//
+// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
+type Issues struct {
+ errs *common.Errors
+ info *celast.SourceInfo
+}
+
+// NewIssues returns an Issues struct from a common.Errors object.
+func NewIssues(errs *common.Errors) *Issues {
+ return NewIssuesWithSourceInfo(errs, nil)
+}
+
+// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
+// which can be used with the `ReportErrorAtID` method for additional error reports within the context
+// information that's inferred from an expression id.
+func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues {
+ return &Issues{
+ errs: errs,
+ info: info,
+ }
+}
+
+// Err returns an error value if the issues list contains one or more errors.
+func (i *Issues) Err() error {
+ if i == nil {
+ return nil
+ }
+ if len(i.Errors()) > 0 {
+ return errors.New(i.String())
+ }
+ return nil
+}
+
+// Errors returns the collection of errors encountered in more granular detail.
+func (i *Issues) Errors() []*Error {
+ if i == nil {
+ return []*Error{}
+ }
+ return i.errs.GetErrors()
+}
+
+// Append collects the issues from another Issues struct into a new Issues object.
+func (i *Issues) Append(other *Issues) *Issues {
+ if i == nil {
+ return other
+ }
+ if other == nil || i == other {
+ return i
+ }
+ return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info)
+}
+
+// String converts the issues to a suitable display string.
+func (i *Issues) String() string {
+ if i == nil {
+ return ""
+ }
+ return i.errs.ToDisplayString()
+}
+
+// ReportErrorAtID reports an error message with an optional set of formatting arguments.
+//
+// The source metadata for the expression at `id`, if present, is attached to the error report.
+// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
+func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
+ i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...)
+}
+
+// getStdEnv lazy initializes the CEL standard environment.
+func getStdEnv() (*Env, error) {
+ stdEnvInit.Do(func() {
+ stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true))
+ })
+ return stdEnv, stdEnvErr
+}
+
+// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider.
+type interopCELTypeProvider struct {
+ ref.TypeProvider
+}
+
+// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists.
+//
+// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type
+// into a native type representation. If the conversion fails, the type is listed as not found.
+func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
+ if et, found := p.FindType(typeName); found {
+ t, err := types.ExprTypeToType(et)
+ if err != nil {
+ return nil, false
+ }
+ return t, true
+ }
+ return nil, false
+}
+
+// FindStructFieldNames returns an empty set of field for the interop provider.
+//
+// To inspect the field names, migrate to a `types.Provider` implementation.
+func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
+ return []string{}, false
+}
+
+// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
+// name, if one exists.
+//
+// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type
+// into a native type representation. If the conversion fails, the type is listed as not found.
+func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
+ if ft, found := p.FindFieldType(structType, fieldName); found {
+ t, err := types.ExprTypeToType(ft.Type)
+ if err != nil {
+ return nil, false
+ }
+ return &types.FieldType{
+ Type: t,
+ IsSet: ft.IsSet,
+ GetFrom: ft.GetFrom,
+ }, true
+ }
+ return nil, false
+}
+
+// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider.
+type interopLegacyTypeProvider struct {
+ types.Provider
+}
+
+// FindType retruns the protobuf Type representation for the input type name if one exists.
+//
+// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type
+// value to a protobuf Type representation.
+//
+// Failure to convert the type will result in the type not being found.
+func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
+ if t, found := p.FindStructType(typeName); found {
+ et, err := types.TypeToExprType(t)
+ if err != nil {
+ return nil, false
+ }
+ return et, true
+ }
+ return nil, false
+}
+
+// FindFieldType returns the protobuf-based FieldType representation for the input type name and field,
+// if one exists.
+//
+// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType
+// value to a protobuf-based ref.FieldType representation if found.
+//
+// Failure to convert the FieldType will result in the field not being found.
+func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
+ if cft, found := p.FindStructFieldType(structType, fieldName); found {
+ et, err := types.TypeToExprType(cft.Type)
+ if err != nil {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: et,
+ IsSet: cft.IsSet,
+ GetFrom: cft.GetFrom,
+ }, true
+ }
+ return nil, false
+}
+
+var (
+ stdEnvInit sync.Once
+ stdEnv *Env
+ stdEnvErr error
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/folding.go b/hack/tools/vendor/github.com/google/cel-go/cel/folding.go
new file mode 100644
index 0000000000..d7060896d3
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/folding.go
@@ -0,0 +1,559 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// ConstantFoldingOption defines a functional option for configuring constant folding.
+type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error)
+
+// MaxConstantFoldIterations limits the number of times literals may be folding during optimization.
+//
+// Defaults to 100 if not set.
+func MaxConstantFoldIterations(limit int) ConstantFoldingOption {
+ return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) {
+ opt.maxFoldIterations = limit
+ return opt, nil
+ }
+}
+
+// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate
+// literal values within function calls and select statements with their evaluated result.
+func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) {
+ folder := &constantFoldingOptimizer{
+ maxFoldIterations: defaultMaxConstantFoldIterations,
+ }
+ var err error
+ for _, o := range opts {
+ folder, err = o(folder)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return folder, nil
+}
+
+type constantFoldingOptimizer struct {
+ maxFoldIterations int
+}
+
+// Optimize queries the expression graph for scalar and aggregate literal expressions within call and
+// select statements and then evaluates them and replaces the call site with the literal result.
+//
+// Note: only values which can be represented as literals in CEL syntax are supported.
+func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
+ root := ast.NavigateAST(a)
+
+ // Walk the list of foldable expression and continue to fold until there are no more folds left.
+ // All of the fold candidates returned by the constantExprMatcher should succeed unless there's
+ // a logic bug with the selection of expressions.
+ foldableExprs := ast.MatchDescendants(root, constantExprMatcher)
+ foldCount := 0
+ for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations {
+ for _, fold := range foldableExprs {
+ // If the expression could be folded because it's a non-strict call, and the
+ // branches are pruned, continue to the next fold.
+ if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) {
+ continue
+ }
+ // Otherwise, assume all context is needed to evaluate the expression.
+ err := tryFold(ctx, a, fold)
+ if err != nil {
+ ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error())
+ return a
+ }
+ }
+ foldCount++
+ foldableExprs = ast.MatchDescendants(root, constantExprMatcher)
+ }
+ // Once all of the constants have been folded, try to run through the remaining comprehensions
+ // one last time. In this case, there's no guarantee they'll run, so we only update the
+ // target comprehension node with the literal value if the evaluation succeeds.
+ for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) {
+ tryFold(ctx, a, compre)
+ }
+
+ // If the output is a list, map, or struct which contains optional entries, then prune it
+ // to make sure that the optionals, if resolved, do not surface in the output literal.
+ pruneOptionalElements(ctx, root)
+
+ // Ensure that all intermediate values in the folded expression can be represented as valid
+ // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents`
+ // to avoid extra allocations during this final pass through the AST.
+ ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.Kind() != ast.LiteralKind {
+ return
+ }
+ val := e.AsLiteral()
+ adapted, err := adaptLiteral(ctx, val)
+ if err != nil {
+ ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error())
+ return
+ }
+ ctx.UpdateExpr(e, adapted)
+ }))
+
+ return a
+}
+
+// tryFold attempts to evaluate a sub-expression to a literal.
+//
+// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise
+// the method will return an error.
+func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
+ // Assume all context is needed to evaluate the expression.
+ subAST := &Ast{
+ impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()),
+ }
+ prg, err := ctx.Program(subAST)
+ if err != nil {
+ return err
+ }
+ out, _, err := prg.Eval(NoVars())
+ if err != nil {
+ return err
+ }
+ // Update the fold expression to be a literal.
+ ctx.UpdateExpr(expr, ctx.NewLiteral(out))
+ return nil
+}
+
+// maybePruneBranches inspects the non-strict call expression to determine whether
+// a branch can be removed. Evaluation will naturally prune logical and / or calls,
+// but conditional will not be pruned cleanly, so this is one small area where the
+// constant folding step reimplements a portion of the evaluator.
+func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool {
+ call := expr.AsCall()
+ args := call.Args()
+ switch call.FunctionName() {
+ case operators.LogicalAnd, operators.LogicalOr:
+ return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr)
+ case operators.Conditional:
+ cond := args[0]
+ truthy := args[1]
+ falsy := args[2]
+ if cond.Kind() != ast.LiteralKind {
+ return false
+ }
+ if cond.AsLiteral() == types.True {
+ ctx.UpdateExpr(expr, truthy)
+ } else {
+ ctx.UpdateExpr(expr, falsy)
+ }
+ return true
+ case operators.In:
+ haystack := args[1]
+ if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
+ ctx.UpdateExpr(expr, ctx.NewLiteral(types.False))
+ return true
+ }
+ needle := args[0]
+ if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
+ needleValue := needle.AsLiteral()
+ list := haystack.AsList()
+ for _, e := range list.Elements() {
+ if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
+ ctx.UpdateExpr(expr, ctx.NewLiteral(types.True))
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool {
+ shortcircuit := types.False
+ skip := types.True
+ if function == operators.LogicalOr {
+ shortcircuit = types.True
+ skip = types.False
+ }
+ newArgs := []ast.Expr{}
+ for _, arg := range args {
+ if arg.Kind() != ast.LiteralKind {
+ newArgs = append(newArgs, arg)
+ continue
+ }
+ if arg.AsLiteral() == skip {
+ continue
+ }
+ if arg.AsLiteral() == shortcircuit {
+ ctx.UpdateExpr(expr, arg)
+ return true
+ }
+ }
+ if len(newArgs) == 0 {
+ newArgs = append(newArgs, args[0])
+ ctx.UpdateExpr(expr, newArgs[0])
+ return true
+ }
+ if len(newArgs) == 1 {
+ ctx.UpdateExpr(expr, newArgs[0])
+ return true
+ }
+ ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...))
+ return true
+}
+
+// pruneOptionalElements works from the bottom up to resolve optional elements within
+// aggregate literals.
+//
+// Note, many aggregate literals will be resolved as arguments to functions or select
+// statements, so this method exists to handle the case where the literal could not be
+// fully resolved or exists outside of a call, select, or comprehension context.
+func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) {
+ aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher)
+ for _, lit := range aggregateLiterals {
+ switch lit.Kind() {
+ case ast.ListKind:
+ pruneOptionalListElements(ctx, lit)
+ case ast.MapKind:
+ pruneOptionalMapEntries(ctx, lit)
+ case ast.StructKind:
+ pruneOptionalStructFields(ctx, lit)
+ }
+ }
+}
+
+func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) {
+ l := e.AsList()
+ elems := l.Elements()
+ optIndices := l.OptionalIndices()
+ if len(optIndices) == 0 {
+ return
+ }
+ updatedElems := []ast.Expr{}
+ updatedIndices := []int32{}
+ newOptIndex := -1
+ for _, e := range elems {
+ newOptIndex++
+ if !l.IsOptional(int32(newOptIndex)) {
+ updatedElems = append(updatedElems, e)
+ continue
+ }
+ if e.Kind() != ast.LiteralKind {
+ updatedElems = append(updatedElems, e)
+ updatedIndices = append(updatedIndices, int32(newOptIndex))
+ continue
+ }
+ optElemVal, ok := e.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedElems = append(updatedElems, e)
+ updatedIndices = append(updatedIndices, int32(newOptIndex))
+ continue
+ }
+ if !optElemVal.HasValue() {
+ newOptIndex-- // Skipping causes the list to get smaller.
+ continue
+ }
+ ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedElems = append(updatedElems, e)
+ }
+ ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices))
+}
+
+func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) {
+ m := e.AsMap()
+ entries := m.Entries()
+ updatedEntries := []ast.EntryExpr{}
+ modified := false
+ for _, e := range entries {
+ entry := e.AsMapEntry()
+ key := entry.Key()
+ val := entry.Value()
+ // If the entry is not optional, or the value-side of the optional hasn't
+ // been resolved to a literal, then preserve the entry as-is.
+ if !entry.IsOptional() || val.Kind() != ast.LiteralKind {
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ optElemVal, ok := val.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ // When the key is not a literal, but the value is, then it needs to be
+ // restored to an optional value.
+ if key.Kind() != ast.LiteralKind {
+ undoOptVal, err := adaptLiteral(ctx, optElemVal)
+ if err != nil {
+ ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err)
+ }
+ ctx.UpdateExpr(val, undoOptVal)
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ modified = true
+ if !optElemVal.HasValue() {
+ continue
+ }
+ ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedEntry := ctx.NewMapEntry(key, val, false)
+ updatedEntries = append(updatedEntries, updatedEntry)
+ }
+ if modified {
+ ctx.UpdateExpr(e, ctx.NewMap(updatedEntries))
+ }
+}
+
+func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) {
+ s := e.AsStruct()
+ fields := s.Fields()
+ updatedFields := []ast.EntryExpr{}
+ modified := false
+ for _, f := range fields {
+ field := f.AsStructField()
+ val := field.Value()
+ if !field.IsOptional() || val.Kind() != ast.LiteralKind {
+ updatedFields = append(updatedFields, f)
+ continue
+ }
+ optElemVal, ok := val.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedFields = append(updatedFields, f)
+ continue
+ }
+ modified = true
+ if !optElemVal.HasValue() {
+ continue
+ }
+ ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedField := ctx.NewStructField(field.Name(), val, false)
+ updatedFields = append(updatedFields, updatedField)
+ }
+ if modified {
+ ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields))
+ }
+}
+
+// adaptLiteral converts a runtime CEL value to its equivalent literal expression.
+//
+// For strongly typed values, the type-provider will be used to reconstruct the fields
+// which are present in the literal and their equivalent initialization values.
+func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) {
+ switch t := val.Type().(type) {
+ case *types.Type:
+ switch t {
+ case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
+ types.NullType, types.StringType, types.UintType:
+ return ctx.NewLiteral(val), nil
+ case types.DurationType:
+ return ctx.NewCall(
+ overloads.TypeConvertDuration,
+ ctx.NewLiteral(val.ConvertToType(types.StringType)),
+ ), nil
+ case types.TimestampType:
+ return ctx.NewCall(
+ overloads.TypeConvertTimestamp,
+ ctx.NewLiteral(val.ConvertToType(types.StringType)),
+ ), nil
+ case types.OptionalType:
+ opt := val.(*types.Optional)
+ if !opt.HasValue() {
+ return ctx.NewCall("optional.none"), nil
+ }
+ target, err := adaptLiteral(ctx, opt.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return ctx.NewCall("optional.of", target), nil
+ case types.TypeType:
+ return ctx.NewIdent(val.(*types.Type).TypeName()), nil
+ case types.ListType:
+ l, ok := val.(traits.Lister)
+ if !ok {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ elems := make([]ast.Expr, l.Size().(types.Int))
+ idx := 0
+ it := l.Iterator()
+ for it.HasNext() == types.True {
+ elemVal := it.Next()
+ elemExpr, err := adaptLiteral(ctx, elemVal)
+ if err != nil {
+ return nil, err
+ }
+ elems[idx] = elemExpr
+ idx++
+ }
+ return ctx.NewList(elems, []int32{}), nil
+ case types.MapType:
+ m, ok := val.(traits.Mapper)
+ if !ok {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ entries := make([]ast.EntryExpr, m.Size().(types.Int))
+ idx := 0
+ it := m.Iterator()
+ for it.HasNext() == types.True {
+ keyVal := it.Next()
+ keyExpr, err := adaptLiteral(ctx, keyVal)
+ if err != nil {
+ return nil, err
+ }
+ valVal := m.Get(keyVal)
+ valExpr, err := adaptLiteral(ctx, valVal)
+ if err != nil {
+ return nil, err
+ }
+ entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false)
+ idx++
+ }
+ return ctx.NewMap(entries), nil
+ default:
+ provider := ctx.CELTypeProvider()
+ fields, found := provider.FindStructFieldNames(t.TypeName())
+ if !found {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ tester := val.(traits.FieldTester)
+ indexer := val.(traits.Indexer)
+ fieldInits := []ast.EntryExpr{}
+ for _, f := range fields {
+ field := types.String(f)
+ if tester.IsSet(field) != types.True {
+ continue
+ }
+ fieldVal := indexer.Get(field)
+ fieldExpr, err := adaptLiteral(ctx, fieldVal)
+ if err != nil {
+ return nil, err
+ }
+ fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false))
+ }
+ return ctx.NewStruct(t.TypeName(), fieldInits), nil
+ }
+ }
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+}
+
+// constantExprMatcher matches calls, select statements, and comprehensions whose arguments
+// are all constant scalar or aggregate literal values.
+//
+// Only comprehensions which are not nested are included as possible constant folds, and only
+// if all variables referenced in the comprehension stack exist are only iteration or
+// accumulation variables.
+func constantExprMatcher(e ast.NavigableExpr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ return constantCallMatcher(e)
+ case ast.SelectKind:
+ sel := e.AsSelect() // guaranteed to be a navigable value
+ return constantMatcher(sel.Operand().(ast.NavigableExpr))
+ case ast.ComprehensionKind:
+ if isNestedComprehension(e) {
+ return false
+ }
+ vars := map[string]bool{}
+ constantExprs := true
+ visitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if e.Kind() == ast.ComprehensionKind {
+ nested := e.AsComprehension()
+ vars[nested.AccuVar()] = true
+ vars[nested.IterVar()] = true
+ }
+ if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] {
+ constantExprs = false
+ }
+ })
+ ast.PreOrderVisit(e, visitor)
+ return constantExprs
+ default:
+ return false
+ }
+}
+
+// constantCallMatcher identifies strict and non-strict calls which can be folded.
+func constantCallMatcher(e ast.NavigableExpr) bool {
+ call := e.AsCall()
+ children := e.Children()
+ fnName := call.FunctionName()
+ if fnName == operators.LogicalAnd {
+ for _, child := range children {
+ if child.Kind() == ast.LiteralKind {
+ return true
+ }
+ }
+ }
+ if fnName == operators.LogicalOr {
+ for _, child := range children {
+ if child.Kind() == ast.LiteralKind {
+ return true
+ }
+ }
+ }
+ if fnName == operators.Conditional {
+ cond := children[0]
+ if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType {
+ return true
+ }
+ }
+ if fnName == operators.In {
+ haystack := children[1]
+ if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
+ return true
+ }
+ needle := children[0]
+ if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
+ needleValue := needle.AsLiteral()
+ list := haystack.AsList()
+ for _, e := range list.Elements() {
+ if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
+ return true
+ }
+ }
+ }
+ }
+ // convert all other calls with constant arguments
+ for _, child := range children {
+ if !constantMatcher(child) {
+ return false
+ }
+ }
+ return true
+}
+
+func isNestedComprehension(e ast.NavigableExpr) bool {
+ parent, found := e.Parent()
+ for found {
+ if parent.Kind() == ast.ComprehensionKind {
+ return true
+ }
+ parent, found = parent.Parent()
+ }
+ return false
+}
+
+func aggregateLiteralMatcher(e ast.NavigableExpr) bool {
+ return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind
+}
+
+var (
+ constantMatcher = ast.ConstantValueMatcher()
+)
+
+const (
+ defaultMaxConstantFoldIterations = 100
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/inlining.go b/hack/tools/vendor/github.com/google/cel-go/cel/inlining.go
new file mode 100644
index 0000000000..78d5bea65b
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/inlining.go
@@ -0,0 +1,228 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// InlineVariable holds a variable name to be matched and an AST representing
+// the expression graph which should be used to replace it.
+type InlineVariable struct {
+ name string
+ alias string
+ def *ast.AST
+}
+
+// Name returns the qualified variable or field selection to replace.
+func (v *InlineVariable) Name() string {
+ return v.name
+}
+
+// Alias returns the alias to use when performing cel.bind() calls during inlining.
+func (v *InlineVariable) Alias() string {
+ return v.alias
+}
+
+// Expr returns the inlined expression value.
+func (v *InlineVariable) Expr() ast.Expr {
+ return v.def.Expr()
+}
+
+// Type indicates the inlined expression type.
+func (v *InlineVariable) Type() *Type {
+ return v.def.GetType(v.def.Expr().ID())
+}
+
+// NewInlineVariable declares a variable name to be replaced by a checked expression.
+func NewInlineVariable(name string, definition *Ast) *InlineVariable {
+ return NewInlineVariableWithAlias(name, name, definition)
+}
+
+// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression.
+// If the variable occurs more than once, the provided alias will be used to replace the expressions
+// where the variable name occurs.
+func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable {
+ return &InlineVariable{name: name, alias: alias, def: definition.impl}
+}
+
+// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions.
+//
+// If a variable occurs one time, the variable is replaced by the inline definition. If the
+// variable occurs more than once, the variable occurences are replaced by a cel.bind() call.
+func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer {
+ return &inliningOptimizer{variables: inlineVars}
+}
+
+type inliningOptimizer struct {
+ variables []*InlineVariable
+}
+
+func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
+ root := ast.NavigateAST(a)
+ for _, inlineVar := range opt.variables {
+ matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name()))
+ // Skip cases where the variable isn't in the expression graph
+ if len(matches) == 0 {
+ continue
+ }
+
+ // For a single match, do a direct replacement of the expression sub-graph.
+ if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) {
+ for _, match := range matches {
+ // Copy the inlined AST expr and source info.
+ copyExpr := ctx.CopyASTAndMetadata(inlineVar.def)
+ opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type())
+ }
+ continue
+ }
+
+ // For multiple matches, find the least common ancestor (lca) and insert the
+ // variable as a cel.bind() macro.
+ var lca ast.NavigableExpr = root
+ lcaAncestorCount := 0
+ ancestors := map[int64]int{}
+ for _, match := range matches {
+ // Update the identifier matches with the provided alias.
+ parent, found := match, true
+ for found {
+ ancestorCount, hasAncestor := ancestors[parent.ID()]
+ if !hasAncestor {
+ ancestors[parent.ID()] = 1
+ parent, found = parent.Parent()
+ continue
+ }
+ if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) {
+ lca = parent
+ lcaAncestorCount = ancestorCount
+ }
+ ancestors[parent.ID()] = ancestorCount + 1
+ parent, found = parent.Parent()
+ }
+ aliasExpr := ctx.NewIdent(inlineVar.Alias())
+ opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type())
+ }
+
+ // Copy the inlined AST expr and source info.
+ copyExpr := ctx.CopyASTAndMetadata(inlineVar.def)
+ // Update the least common ancestor by inserting a cel.bind() call to the alias.
+ inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca)
+ opt.inlineExpr(ctx, lca, inlined, inlineVar.Type())
+ ctx.SetMacroCall(lca.ID(), bindMacro)
+ }
+ return a
+}
+
+// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining
+// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is
+// made to determine whether the inlined value can be presence or existence tested.
+func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) {
+ switch prev.Kind() {
+ case ast.SelectKind:
+ sel := prev.AsSelect()
+ if !sel.IsTestOnly() {
+ ctx.UpdateExpr(prev, inlined)
+ return
+ }
+ opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType)
+ default:
+ ctx.UpdateExpr(prev, inlined)
+ }
+}
+
+// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe
+// expression appropriate for the inlined type, if possible.
+//
+// If the rewrite is not possible an error is reported at the inline expression site.
+func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) {
+ // If the input inlined expression is not a select expression it won't work with the has()
+ // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error.
+ if inlined.Kind() == ast.SelectKind {
+ presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined)
+ ctx.UpdateExpr(prev, presenceTest)
+ ctx.SetMacroCall(prev.ID(), hasMacro)
+ return
+ }
+
+ ctx.ClearMacroCall(prev.ID())
+ if inlinedType.IsAssignableType(NullType) {
+ ctx.UpdateExpr(prev,
+ ctx.NewCall(operators.NotEquals,
+ inlined,
+ ctx.NewLiteral(types.NullValue),
+ ))
+ return
+ }
+ if inlinedType.HasTrait(traits.SizerType) {
+ ctx.UpdateExpr(prev,
+ ctx.NewCall(operators.NotEquals,
+ ctx.NewMemberCall(overloads.Size, inlined),
+ ctx.NewLiteral(types.IntZero),
+ ))
+ return
+ }
+ ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType)
+}
+
+// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression
+// being replaced occurs within a presence test. Value types with a size() method or field selection
+// support can be bound.
+//
+// In future iterations, support may also be added for indexer types which can be rewritten as an `in`
+// expression; however, this would imply a rewrite of the inlined expression that may not be necessary
+// in most cases.
+func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool {
+ if inlinedType.IsAssignableType(NullType) ||
+ inlinedType.HasTrait(traits.SizerType) {
+ return true
+ }
+ for _, m := range matches {
+ if m.Kind() != ast.SelectKind {
+ continue
+ }
+ sel := m.AsSelect()
+ if sel.IsTestOnly() {
+ return false
+ }
+ }
+ return true
+}
+
+// matchVariable matches simple identifiers, select expressions, and presence test expressions
+// which match the (potentially) qualified variable name provided as input.
+//
+// Note, this function does not support inlining against select expressions which includes optional
+// field selection. This may be a future refinement.
+func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher {
+ return func(e ast.NavigableExpr) bool {
+ if e.Kind() == ast.IdentKind && e.AsIdent() == varName {
+ return true
+ }
+ if e.Kind() == ast.SelectKind {
+ sel := e.AsSelect()
+ // While the `ToQualifiedName` call could take the select directly, this
+ // would skip presence tests from possible matches, which we would like
+ // to include.
+ qualName, found := containers.ToQualifiedName(sel.Operand())
+ return found && qualName+"."+sel.FieldName() == varName
+ }
+ return false
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/io.go b/hack/tools/vendor/github.com/google/cel-go/cel/io.go
new file mode 100644
index 0000000000..7d08d1c813
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/io.go
@@ -0,0 +1,288 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/parser"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// CheckedExprToAst converts a checked expression proto message to an Ast.
+func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
+ checked, _ := CheckedExprToAstWithSource(checkedExpr, nil)
+ return checked
+}
+
+// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
+// using the provided Source as the textual contents.
+//
+// In general the source is not necessary unless the AST has been modified between the
+// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source
+// through future calls.
+//
+// Prefer CheckedExprToAst if loading expressions from storage.
+func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
+ checked, err := ast.ToAST(checkedExpr)
+ if err != nil {
+ return nil, err
+ }
+ return &Ast{source: src, impl: checked}, nil
+}
+
+// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
+//
+// If the Ast.IsChecked() returns false, this conversion method will return an error.
+func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
+ if !a.IsChecked() {
+ return nil, fmt.Errorf("cannot convert unchecked ast")
+ }
+ return ast.ToProto(a.impl)
+}
+
+// ParsedExprToAst converts a parsed expression proto message to an Ast.
+func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
+ return ParsedExprToAstWithSource(parsedExpr, nil)
+}
+
+// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast,
+// using the provided Source as the textual contents.
+//
+// In general you only need this if you need to recheck a previously checked
+// expression, or if you need to separately check a subset of an expression.
+//
+// Prefer ParsedExprToAst if loading expressions from storage.
+func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
+ info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo())
+ if src == nil {
+ src = common.NewInfoSource(parsedExpr.GetSourceInfo())
+ }
+ e, _ := ast.ProtoToExpr(parsedExpr.GetExpr())
+ return &Ast{source: src, impl: ast.NewAST(e, info)}
+}
+
+// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
+func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
+ return &exprpb.ParsedExpr{
+ Expr: a.Expr(),
+ SourceInfo: a.SourceInfo(),
+ }, nil
+}
+
+// AstToString converts an Ast back to a string if possible.
+//
+// Note, the conversion may not be an exact replica of the original expression, but will produce
+// a string that is semantically equivalent and whose textual representation is stable.
+func AstToString(a *Ast) (string, error) {
+ return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo())
+}
+
+// RefValueToValue converts between ref.Val and api.expr.Value.
+// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
+func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
+ return ValueAsAlphaProto(res)
+}
+
+func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) {
+ canonical, err := ValueAsProto(res)
+ if err != nil {
+ return nil, err
+ }
+ alpha := &exprpb.Value{}
+ err = convertProto(canonical, alpha)
+ return alpha, err
+}
+
+func ValueAsProto(res ref.Val) (*celpb.Value, error) {
+ switch res.Type() {
+ case types.BoolType:
+ return &celpb.Value{
+ Kind: &celpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
+ case types.BytesType:
+ return &celpb.Value{
+ Kind: &celpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &celpb.Value{
+ Kind: &celpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
+ case types.IntType:
+ return &celpb.Value{
+ Kind: &celpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
+ case types.ListType:
+ l := res.(traits.Lister)
+ sz := l.Size().(types.Int)
+ elts := make([]*celpb.Value, 0, int64(sz))
+ for i := types.Int(0); i < sz; i++ {
+ v, err := ValueAsProto(l.Get(i))
+ if err != nil {
+ return nil, err
+ }
+ elts = append(elts, v)
+ }
+ return &celpb.Value{
+ Kind: &celpb.Value_ListValue{
+ ListValue: &celpb.ListValue{Values: elts}}}, nil
+ case types.MapType:
+ mapper := res.(traits.Mapper)
+ sz := mapper.Size().(types.Int)
+ entries := make([]*celpb.MapValue_Entry, 0, int64(sz))
+ for it := mapper.Iterator(); it.HasNext().(types.Bool); {
+ k := it.Next()
+ v := mapper.Get(k)
+ kv, err := ValueAsProto(k)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := ValueAsProto(v)
+ if err != nil {
+ return nil, err
+ }
+ entries = append(entries, &celpb.MapValue_Entry{Key: kv, Value: vv})
+ }
+ return &celpb.Value{
+ Kind: &celpb.Value_MapValue{
+ MapValue: &celpb.MapValue{Entries: entries}}}, nil
+ case types.NullType:
+ return &celpb.Value{
+ Kind: &celpb.Value_NullValue{}}, nil
+ case types.StringType:
+ return &celpb.Value{
+ Kind: &celpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
+ case types.TypeType:
+ typeName := res.(ref.Type).TypeName()
+ return &celpb.Value{Kind: &celpb.Value_TypeValue{TypeValue: typeName}}, nil
+ case types.UintType:
+ return &celpb.Value{
+ Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
+ default:
+ any, err := res.ConvertToNative(anyPbType)
+ if err != nil {
+ return nil, err
+ }
+ return &celpb.Value{
+ Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
+ }
+}
+
+var (
+ typeNameToTypeValue = map[string]ref.Val{
+ "bool": types.BoolType,
+ "bytes": types.BytesType,
+ "double": types.DoubleType,
+ "null_type": types.NullType,
+ "int": types.IntType,
+ "list": types.ListType,
+ "map": types.MapType,
+ "string": types.StringType,
+ "type": types.TypeType,
+ "uint": types.UintType,
+ }
+
+ anyPbType = reflect.TypeOf(&anypb.Any{})
+)
+
+// ValueToRefValue converts between exprpb.Value and ref.Val.
+func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ return AlphaProtoAsValue(adapter, v)
+}
+
+func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ canonical := &celpb.Value{}
+ if err := convertProto(v, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsValue(adapter, canonical)
+}
+
+func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) {
+ switch v.Kind.(type) {
+ case *celpb.Value_NullValue:
+ return types.NullValue, nil
+ case *celpb.Value_BoolValue:
+ return types.Bool(v.GetBoolValue()), nil
+ case *celpb.Value_Int64Value:
+ return types.Int(v.GetInt64Value()), nil
+ case *celpb.Value_Uint64Value:
+ return types.Uint(v.GetUint64Value()), nil
+ case *celpb.Value_DoubleValue:
+ return types.Double(v.GetDoubleValue()), nil
+ case *celpb.Value_StringValue:
+ return types.String(v.GetStringValue()), nil
+ case *celpb.Value_BytesValue:
+ return types.Bytes(v.GetBytesValue()), nil
+ case *celpb.Value_ObjectValue:
+ any := v.GetObjectValue()
+ msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true})
+ if err != nil {
+ return nil, err
+ }
+ return adapter.NativeToValue(msg), nil
+ case *celpb.Value_MapValue:
+ m := v.GetMapValue()
+ entries := make(map[ref.Val]ref.Val)
+ for _, entry := range m.Entries {
+ key, err := ProtoAsValue(adapter, entry.Key)
+ if err != nil {
+ return nil, err
+ }
+ pb, err := ProtoAsValue(adapter, entry.Value)
+ if err != nil {
+ return nil, err
+ }
+ entries[key] = pb
+ }
+ return adapter.NativeToValue(entries), nil
+ case *celpb.Value_ListValue:
+ l := v.GetListValue()
+ elts := make([]ref.Val, len(l.Values))
+ for i, e := range l.Values {
+ rv, err := ProtoAsValue(adapter, e)
+ if err != nil {
+ return nil, err
+ }
+ elts[i] = rv
+ }
+ return adapter.NativeToValue(elts), nil
+ case *celpb.Value_TypeValue:
+ typeName := v.GetTypeValue()
+ tv, ok := typeNameToTypeValue[typeName]
+ if ok {
+ return tv, nil
+ }
+ return types.NewObjectTypeValue(typeName), nil
+ }
+ return nil, errors.New("unknown value")
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/library.go b/hack/tools/vendor/github.com/google/cel-go/cel/library.go
new file mode 100644
index 0000000000..be59f1b028
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/library.go
@@ -0,0 +1,790 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/stdlib"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+)
+
+const (
+ optMapMacro = "optMap"
+ optFlatMapMacro = "optFlatMap"
+ hasValueFunc = "hasValue"
+ optionalNoneFunc = "optional.none"
+ optionalOfFunc = "optional.of"
+ optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
+ valueFunc = "value"
+ unusedIterVar = "#unused"
+)
+
+// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
+// environment for a particular use case or with a related set of functionality.
+//
+// Note, the ProgramOption values provided by a library are expected to be static and not vary
+// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to
+// configure these options outside the Library and within the Env.Program() call directly.
+type Library interface {
+ // CompileOptions returns a collection of functional options for configuring the Parse / Check
+ // environment.
+ CompileOptions() []EnvOption
+
+ // ProgramOptions returns a collection of functional options which should be included in every
+ // Program generated from the Env.Program() call.
+ ProgramOptions() []ProgramOption
+}
+
+// SingletonLibrary refines the Library interface to ensure that libraries in this format are only
+// configured once within the environment.
+type SingletonLibrary interface {
+ Library
+
+ // LibraryName provides a namespaced name which is used to check whether the library has already
+ // been configured in the environment.
+ LibraryName() string
+}
+
+// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
+// and to be linked to each other.
+func Lib(l Library) EnvOption {
+ singleton, isSingleton := l.(SingletonLibrary)
+ return func(e *Env) (*Env, error) {
+ if isSingleton {
+ if e.HasLibrary(singleton.LibraryName()) {
+ return e, nil
+ }
+ e.libraries[singleton.LibraryName()] = true
+ }
+ var err error
+ for _, opt := range l.CompileOptions() {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.progOpts = append(e.progOpts, l.ProgramOptions()...)
+ return e, nil
+ }
+}
+
+// StdLib returns an EnvOption for the standard library of CEL functions and macros.
+func StdLib() EnvOption {
+ return Lib(stdLibrary{})
+}
+
+// stdLibrary implements the Library interface and provides functional options for the core CEL
+// features documented in the specification.
+type stdLibrary struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (stdLibrary) LibraryName() string {
+ return "cel.lib.std"
+}
+
+// CompileOptions returns options for the standard CEL function declarations and macros.
+func (stdLibrary) CompileOptions() []EnvOption {
+ return []EnvOption{
+ func(e *Env) (*Env, error) {
+ var err error
+ for _, fn := range stdlib.Functions() {
+ existing, found := e.functions[fn.Name()]
+ if found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.functions[fn.Name()] = fn
+ }
+ return e, nil
+ },
+ func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, stdlib.Types()...)
+ return e, nil
+ },
+ Macros(StandardMacros...),
+ }
+}
+
+// ProgramOptions returns function implementations for the standard CEL functions.
+func (stdLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{}
+}
+
+// OptionalTypes enable support for optional syntax and types in CEL.
+//
+// The optional value type makes it possible to express whether variables have
+// been provided, whether a result has been computed, and in the future whether
+// an object field path, map key value, or list index has a value.
+//
+// # Syntax Changes
+//
+// OptionalTypes are unlike other CEL extensions because they modify the CEL
+// syntax itself, notably through the use of a `?` preceding a field name or
+// index value.
+//
+// ## Field Selection
+//
+// The optional syntax in field selection is denoted as `obj.?field`. In other
+// words, if a field is set, return `optional.of(obj.field)“, else
+// `optional.none()`. The optional field selection is viral in the sense that
+// after the first optional selection all subsequent selections or indices
+// are treated as optional, i.e. the following expressions are equivalent:
+//
+// obj.?field.subfield
+// obj.?field.?subfield
+//
+// ## Indexing
+//
+// Similar to field selection, the optional syntax can be used in index
+// expressions on maps and lists:
+//
+// list[?0]
+// map[?key]
+//
+// ## Optional Field Setting
+//
+// When creating map or message literals, if a field may be optionally set
+// based on its presence, then placing a `?` before the field name or key
+// will ensure the type on the right-hand side must be optional(T) where T
+// is the type of the field or key-value.
+//
+// The following returns a map with the key expression set only if the
+// subfield is present, otherwise an empty map is created:
+//
+// {?key: obj.?field.subfield}
+//
+// ## Optional Element Setting
+//
+// When creating list literals, an element in the list may be optionally added
+// when the element expression is preceded by a `?`:
+//
+// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c]
+//
+// # Optional.Of
+//
+// Create an optional(T) value of a given value with type T.
+//
+// optional.of(10)
+//
+// # Optional.OfNonZeroValue
+//
+// Create an optional(T) value of a given value with type T if it is not a
+// zero-value. A zero-value the default empty value for any given CEL type,
+// including empty protobuf message types. If the value is empty, the result
+// of this call will be optional.none().
+//
+// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int))
+// optional.ofNonZeroValue([]) // optional.none()
+// optional.ofNonZeroValue(0) // optional.none()
+// optional.ofNonZeroValue("") // optional.none()
+//
+// # Optional.None
+//
+// Create an empty optional value.
+//
+// # HasValue
+//
+// Determine whether the optional contains a value.
+//
+// optional.of(b'hello').hasValue() // true
+// optional.ofNonZeroValue({}).hasValue() // false
+//
+// # Value
+//
+// Get the value contained by the optional. If the optional does not have a
+// value, the result will be a CEL error.
+//
+// optional.of(b'hello').value() // b'hello'
+// optional.ofNonZeroValue({}).value() // error
+//
+// # Or
+//
+// If the value on the left-hand side is optional.none(), the optional value
+// on the right hand side is returned. If the value on the left-hand set is
+// valued, then it is returned. This operation is short-circuiting and will
+// only evaluate as many links in the `or` chain as are needed to return a
+// non-empty optional value.
+//
+// obj.?field.or(m[?key])
+// l[?index].or(obj.?field.subfield).or(obj.?other)
+//
+// # OrValue
+//
+// Either return the value contained within the optional on the left-hand side
+// or return the alternative value on the right hand side.
+//
+// m[?key].orValue("none")
+//
+// # OptMap
+//
+// Apply a transformation to the optional's underlying value if it is not empty
+// and return an optional typed result based on the transformation. The
+// transformation expression type must return a type T which is wrapped into
+// an optional.
+//
+// msg.?elements.optMap(e, e.size()).orValue(0)
+//
+// # OptFlatMap
+//
+// Introduced in version: 1
+//
+// Apply a transformation to the optional's underlying value if it is not empty
+// and return the result. The transform expression must return an optional(T)
+// rather than type T. This can be useful when dealing with zero values and
+// conditionally generating an empty or non-empty result in ways which cannot
+// be expressed with `optMap`.
+//
+// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
+func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
+ lib := &optionalLib{version: math.MaxUint32}
+ for _, opt := range opts {
+ lib = opt(lib)
+ }
+ return Lib(lib)
+}
+
+type optionalLib struct {
+ version uint32
+}
+
+// OptionalTypesOption is a functional interface for configuring the strings library.
+type OptionalTypesOption func(*optionalLib) *optionalLib
+
+// OptionalTypesVersion configures the version of the optional type library.
+//
+// The version limits which functions are available. Only functions introduced
+// below or equal to the given version included in the library. If this option
+// is not set, all functions are available.
+//
+// See the library documentation to determine which version a function was introduced.
+// If the documentation does not state which version a function was introduced, it can
+// be assumed to be introduced at version 0, when the library was first created.
+func OptionalTypesVersion(version uint32) OptionalTypesOption {
+ return func(lib *optionalLib) *optionalLib {
+ lib.version = version
+ return lib
+ }
+}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (lib *optionalLib) LibraryName() string {
+ return "cel.lib.optional"
+}
+
+// CompileOptions implements the Library interface method.
+func (lib *optionalLib) CompileOptions() []EnvOption {
+ paramTypeK := TypeParamType("K")
+ paramTypeV := TypeParamType("V")
+ optionalTypeV := OptionalType(paramTypeV)
+ listTypeV := ListType(paramTypeV)
+ mapTypeKV := MapType(paramTypeK, paramTypeV)
+
+ opts := []EnvOption{
+ // Enable the optional syntax in the parser.
+ enableOptionalSyntax(),
+
+ // Introduce the optional type.
+ Types(types.OptionalType),
+
+ // Configure the optMap and optFlatMap macros.
+ Macros(ReceiverMacro(optMapMacro, 2, optMap)),
+
+ // Global and member functions for working with optional values.
+ Function(optionalOfFunc,
+ Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ return types.OptionalOf(value)
+ }))),
+ Function(optionalOfNonZeroValueFunc,
+ Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ v, isZeroer := value.(traits.Zeroer)
+ if !isZeroer || !v.IsZeroValue() {
+ return types.OptionalOf(value)
+ }
+ return types.OptionalNone
+ }))),
+ Function(optionalNoneFunc,
+ Overload("optional_none", []*Type{}, optionalTypeV,
+ FunctionBinding(func(values ...ref.Val) ref.Val {
+ return types.OptionalNone
+ }))),
+ Function(valueFunc,
+ MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return opt.GetValue()
+ }))),
+ Function(hasValueFunc,
+ MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return types.Bool(opt.HasValue())
+ }))),
+
+ // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
+ // evaluation chain.
+ Function("or",
+ MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
+ Function("orValue",
+ MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
+
+ // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
+ // optput type.
+ Function(operators.OptSelect,
+ Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
+
+ // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
+ // these signatures to determine type-agreement without any special handling.
+ Function(operators.OptIndex,
+ Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
+ Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
+ Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+
+ // Index overloads to accommodate using an optional value as the operand.
+ Function(operators.Index,
+ Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+ }
+ if lib.version >= 1 {
+ opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
+ }
+ return opts
+}
+
+// ProgramOptions implements the Library interface method.
+func (lib *optionalLib) ProgramOptions() []ProgramOption {
+ return []ProgramOption{
+ CustomDecorator(decorateOptionalOr),
+ }
+}
+
+func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.Kind() {
+ case ast.IdentKind:
+ varName = varIdent.AsIdent()
+ default:
+ return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier")
+ }
+ mapExpr := args[1]
+ return meh.NewCall(
+ operators.Conditional,
+ meh.NewMemberCall(hasValueFunc, target),
+ meh.NewCall(optionalOfFunc,
+ meh.NewComprehension(
+ meh.NewList(),
+ unusedIterVar,
+ varName,
+ meh.NewMemberCall(valueFunc, meh.Copy(target)),
+ meh.NewLiteral(types.False),
+ meh.NewIdent(varName),
+ mapExpr,
+ ),
+ ),
+ meh.NewCall(optionalNoneFunc),
+ ), nil
+}
+
+func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.Kind() {
+ case ast.IdentKind:
+ varName = varIdent.AsIdent()
+ default:
+ return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier")
+ }
+ mapExpr := args[1]
+ return meh.NewCall(
+ operators.Conditional,
+ meh.NewMemberCall(hasValueFunc, target),
+ meh.NewComprehension(
+ meh.NewList(),
+ unusedIterVar,
+ varName,
+ meh.NewMemberCall(valueFunc, meh.Copy(target)),
+ meh.NewLiteral(types.False),
+ meh.NewIdent(varName),
+ mapExpr,
+ ),
+ meh.NewCall(optionalNoneFunc),
+ ), nil
+}
+
+func enableOptionalSyntax() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
+ return e, nil
+ }
+}
+
+// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field
+// selection is performed on a primitive type.
+func EnableErrorOnBadPresenceTest(value bool) EnvOption {
+ return features(featureEnableErrorOnBadPresenceTest, value)
+}
+
+func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
+ call, ok := i.(interpreter.InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return i, nil
+ }
+ switch call.Function() {
+ case "or":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" {
+ return i, nil
+ }
+ return &evalOptionalOr{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ case "orValue":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" {
+ return i, nil
+ }
+ return &evalOptionalOrValue{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ default:
+ return i, nil
+ }
+}
+
+// evalOptionalOr selects between two optional values, either the first if it has a value, or
+// the second optional expression is evaluated and returned.
+type evalOptionalOr struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOr) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value,
+// its value is returned, otherwise the alternative value expression is evaluated and returned.
+type evalOptionalOrValue struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOrValue) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal.GetValue()
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+type timeUTCLibrary struct{}
+
+func (timeUTCLibrary) CompileOptions() []EnvOption {
+ return timeOverloadDeclarations
+}
+
+func (timeUTCLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{}
+}
+
+// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
+// in the CEL expression.
+var (
+ utcTZ = types.String("UTC")
+
+ timeOverloadDeclarations = []EnvOption{
+ Function(overloads.TimeGetHours,
+ MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetHours))),
+ Function(overloads.TimeGetMinutes,
+ MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetMinutes))),
+ Function(overloads.TimeGetSeconds,
+ MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetSeconds))),
+ Function(overloads.TimeGetMilliseconds,
+ MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetMilliseconds))),
+ Function(overloads.TimeGetFullYear,
+ MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetFullYear(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetFullYear),
+ ),
+ ),
+ Function(overloads.TimeGetMonth,
+ MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMonth(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMonth),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfYear,
+ MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(func(ts, tz ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, tz)
+ }),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfMonth,
+ MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthZeroBased(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfMonthZeroBased),
+ ),
+ ),
+ Function(overloads.TimeGetDate,
+ MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthOneBased(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfMonthOneBased),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfWeek,
+ MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfWeek(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfWeek),
+ ),
+ ),
+ Function(overloads.TimeGetHours,
+ MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetHours(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetHours),
+ ),
+ ),
+ Function(overloads.TimeGetMinutes,
+ MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMinutes(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMinutes),
+ ),
+ ),
+ Function(overloads.TimeGetSeconds,
+ MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetSeconds(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetSeconds),
+ ),
+ ),
+ Function(overloads.TimeGetMilliseconds,
+ MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMilliseconds(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMilliseconds),
+ ),
+ ),
+ }
+)
+
+func timestampGetFullYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Year())
+}
+
+func timestampGetMonth(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return types.Int(t.Month() - 1)
+}
+
+func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.YearDay() - 1)
+}
+
+func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Day() - 1)
+}
+
+func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Day())
+}
+
+func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Weekday())
+}
+
+func timestampGetHours(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Hour())
+}
+
+func timestampGetMinutes(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Minute())
+}
+
+func timestampGetSeconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Second())
+}
+
+func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Nanosecond() / 1000000)
+}
+
+func inTimeZone(ts, tz ref.Val) (time.Time, error) {
+ t := ts.(types.Timestamp)
+ val := string(tz.(types.String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return t.In(loc), nil
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return t.In(timezone), nil
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/macro.go b/hack/tools/vendor/github.com/google/cel-go/cel/macro.go
new file mode 100644
index 0000000000..4db1fd57a9
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/macro.go
@@ -0,0 +1,576 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Macro describes a function signature to match and the MacroExpander to apply.
+//
+// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
+// a Macro should be created per arg-count or as a var arg macro.
+type Macro = parser.Macro
+
+// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value.
+type MacroFactory = parser.MacroExpander
+
+// MacroExprFactory assists with the creation of Expr values in a manner which is consistent
+// the internal semantics and id generation behaviors of the parser and checker libraries.
+type MacroExprFactory = parser.ExprHelper
+
+// MacroExpander converts a call and its associated arguments into a protobuf Expr representation.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
+//
+// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
+// and produces as output an Expr ast node.
+//
+// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
+type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error)
+
+// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
+// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
+// consistent with the source position and expression id generation code leveraged by both
+// the parser and type-checker.
+type MacroExprHelper interface {
+ // Copy the input expression with a brand new set of identifiers.
+ Copy(*exprpb.Expr) *exprpb.Expr
+
+ // LiteralBool creates an Expr value for a bool literal.
+ LiteralBool(value bool) *exprpb.Expr
+
+ // LiteralBytes creates an Expr value for a byte literal.
+ LiteralBytes(value []byte) *exprpb.Expr
+
+ // LiteralDouble creates an Expr value for double literal.
+ LiteralDouble(value float64) *exprpb.Expr
+
+ // LiteralInt creates an Expr value for an int literal.
+ LiteralInt(value int64) *exprpb.Expr
+
+ // LiteralString creates am Expr value for a string literal.
+ LiteralString(value string) *exprpb.Expr
+
+ // LiteralUint creates an Expr value for a uint literal.
+ LiteralUint(value uint64) *exprpb.Expr
+
+ // NewList creates a CreateList instruction where the list is comprised of the optional set
+ // of elements provided as arguments.
+ NewList(elems ...*exprpb.Expr) *exprpb.Expr
+
+ // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+ // optional set of key, value entries.
+ NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewMapEntry creates a Map Entry for the key, value pair.
+ NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // NewObject creates a CreateStruct instruction for an object with a given type name and
+ // optional set of field initializers.
+ NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewObjectFieldInit creates a new Object field initializer from the field name and value.
+ NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // Fold creates a fold comprehension instruction.
+ //
+ // - iterVar is the iteration variable name.
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr
+
+ // Ident creates an identifier Expr value.
+ Ident(name string) *exprpb.Expr
+
+ // AccuIdent returns an accumulator identifier for use with comprehension results.
+ AccuIdent() *exprpb.Expr
+
+ // GlobalCall creates a function call Expr value for a global (free) function.
+ GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
+
+ // ReceiverCall creates a function call Expr value for a receiver-style function.
+ ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
+
+ // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // Select create a field traversal Expr value.
+ Select(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // OffsetLocation returns the Location of the expression identifier.
+ OffsetLocation(exprID int64) common.Location
+
+ // NewError associates an error message with a given expression id.
+ NewError(exprID int64, message string) *Error
+}
+
+// GlobalMacro creates a Macro for a global function with the specified arg count.
+func GlobalMacro(function string, argCount int, factory MacroFactory) Macro {
+ return parser.NewGlobalMacro(function, argCount, factory)
+}
+
+// ReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro {
+ return parser.NewReceiverMacro(function, argCount, factory)
+}
+
+// GlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+func GlobalVarArgMacro(function string, factory MacroFactory) Macro {
+ return parser.NewGlobalVarArgMacro(function, factory)
+}
+
+// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+func ReceiverVarArgMacro(function string, factory MacroFactory) Macro {
+ return parser.NewReceiverVarArgMacro(function, factory)
+}
+
+// NewGlobalMacro creates a Macro for a global function with the specified arg count.
+//
+// Deprecated: use GlobalMacro
+func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewGlobalMacro(function, argCount, expand.Expander)
+}
+
+// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+//
+// Deprecated: use ReceiverMacro
+func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewReceiverMacro(function, argCount, expand.Expander)
+}
+
+// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+//
+// Deprecated: use GlobalVarArgMacro
+func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewGlobalVarArgMacro(function, expand.Expander)
+}
+
+// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+//
+// Deprecated: use ReceiverVarArgMacro
+func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewReceiverVarArgMacro(function, expand.Expander)
+}
+
+// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field)
+func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ arg, err := adaptToExpr(args[0])
+ if err != nil {
+ return nil, err
+ }
+ if arg.Kind() == ast.SelectKind {
+ s := arg.AsSelect()
+ return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName()))
+ }
+ return nil, ph.NewError(arg.ID(), "invalid argument to has() macro")
+}
+
+// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
+// elements in the range match the predicate expressions:
+// .exists(, )
+func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
+// one of the elements in the range match the predicate expressions:
+// .exists_one(, )
+func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
+// input to produce an output list.
+//
+// There are two call patterns supported by map:
+//
+// .map(, )
+// .map(, , )
+//
+// In the second form only iterVar values which return true when provided to the predicate expression
+// are transformed.
+func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
+// only elements which match the provided predicate expression:
+// .filter(, )
+func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+var (
+ // Aliases to each macro in the CEL standard environment.
+ // Note: reassigning these macro variables may result in undefined behavior.
+
+ // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
+ // specify the field as a string.
+ HasMacro = parser.HasMacro
+
+ // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
+ // elements in the range satisfy the predicate.
+ AllMacro = parser.AllMacro
+
+ // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
+ // some element in the range satisfies the predicate.
+ ExistsMacro = parser.ExistsMacro
+
+ // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
+ // element in range the predicate holds.
+ ExistsOneMacro = parser.ExistsOneMacro
+
+ // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
+ // to each element in the range to produce a new list.
+ MapMacro = parser.MapMacro
+
+ // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
+ // first filters the elements in the range by the predicate, then applies the transform function
+ // to produce a new list.
+ MapFilterMacro = parser.MapFilterMacro
+
+ // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
+ // elements in the range, producing a new list from the elements that satisfy the predicate.
+ FilterMacro = parser.FilterMacro
+
+ // StandardMacros provides an alias to all the CEL macros defined in the standard environment.
+ StandardMacros = []Macro{
+ HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro,
+ }
+
+ // NoMacros provides an alias to an empty list of macros
+ NoMacros = []Macro{}
+)
+
+type adaptingExpander struct {
+ legacyExpander MacroExpander
+}
+
+func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ var legacyTarget *exprpb.Expr = nil
+ var err *Error = nil
+ if target != nil {
+ legacyTarget, err = adaptToProto(target)
+ if err != nil {
+ return nil, err
+ }
+ }
+ legacyArgs := make([]*exprpb.Expr, len(args))
+ for i, arg := range args {
+ legacyArgs[i], err = adaptToProto(arg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ah := &adaptingHelper{modernHelper: eh}
+ legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs)
+ if err != nil {
+ return nil, err
+ }
+ ex, err := adaptToExpr(legacyExpr)
+ if err != nil {
+ return nil, err
+ }
+ return ex, nil
+}
+
+func wrapErr(id int64, message string, err error) *common.Error {
+ return &common.Error{
+ Location: common.NoLocation,
+ Message: fmt.Sprintf("%s: %v", message, err),
+ ExprID: id,
+ }
+}
+
+type adaptingHelper struct {
+ modernHelper parser.ExprHelper
+}
+
+// Copy the input expression with a brand new set of identifiers.
+func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e)))
+}
+
+// LiteralBool creates an Expr value for a bool literal.
+func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value)))
+}
+
+// LiteralBytes creates an Expr value for a byte literal.
+func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value)))
+}
+
+// LiteralDouble creates an Expr value for double literal.
+func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value)))
+}
+
+// LiteralInt creates an Expr value for an int literal.
+func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value)))
+}
+
+// LiteralString creates am Expr value for a string literal.
+func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value)))
+}
+
+// LiteralUint creates an Expr value for a uint literal.
+func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value)))
+}
+
+// NewList creates a CreateList instruction where the list is comprised of the optional set
+// of elements provided as arguments.
+func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...))
+}
+
+// NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+// optional set of key, value entries.
+func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ adaptedEntries := make([]ast.EntryExpr, len(entries))
+ for i, e := range entries {
+ adaptedEntries[i] = mustAdaptToEntryExpr(e)
+ }
+ return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...))
+}
+
+// NewMapEntry creates a Map Entry for the key, value pair.
+func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return mustAdaptToProtoEntry(
+ ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional))
+}
+
+// NewObject creates a CreateStruct instruction for an object with a given type name and
+// optional set of field initializers.
+func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ adaptedEntries := make([]ast.EntryExpr, len(fieldInits))
+ for i, e := range fieldInits {
+ adaptedEntries[i] = mustAdaptToEntryExpr(e)
+ }
+ return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...))
+}
+
+// NewObjectFieldInit creates a new Object field initializer from the field name and value.
+func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return mustAdaptToProtoEntry(
+ ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional))
+}
+
+// Fold creates a fold comprehension instruction.
+//
+// - iterVar is the iteration variable name.
+// - iterRange represents the expression that resolves to a list or map where the elements or
+// keys (respectively) will be iterated over.
+// - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+// - accuInit is the initial expression whose value will be set for the accuVar prior to
+// folding.
+// - condition is the expression to test to determine whether to continue folding.
+// - step is the expression to evaluation at the conclusion of a single fold iteration.
+// - result is the computation to evaluate at the conclusion of the fold.
+//
+// The accuVar should not shadow variable names that you would like to reference within the
+// environment in the step and condition expressions. Presently, the name __result__ is commonly
+// used by built-in macros but this may change in the future.
+func (ah *adaptingHelper) Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(
+ ah.modernHelper.NewComprehension(
+ mustAdaptToExpr(iterRange),
+ iterVar,
+ accuVar,
+ mustAdaptToExpr(accuInit),
+ mustAdaptToExpr(condition),
+ mustAdaptToExpr(step),
+ mustAdaptToExpr(result),
+ ),
+ )
+}
+
+// Ident creates an identifier Expr value.
+func (ah *adaptingHelper) Ident(name string) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewIdent(name))
+}
+
+// AccuIdent returns an accumulator identifier for use with comprehension results.
+func (ah *adaptingHelper) AccuIdent() *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewAccuIdent())
+}
+
+// GlobalCall creates a function call Expr value for a global (free) function.
+func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...))
+}
+
+// ReceiverCall creates a function call Expr value for a receiver-style function.
+func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(
+ ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...))
+}
+
+// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
+ op := mustAdaptToExpr(operand)
+ return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field))
+}
+
+// Select create a field traversal Expr value.
+func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
+ op := mustAdaptToExpr(operand)
+ return mustAdaptToProto(ah.modernHelper.NewSelect(op, field))
+}
+
+// OffsetLocation returns the Location of the expression identifier.
+func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location {
+ return ah.modernHelper.OffsetLocation(exprID)
+}
+
+// NewError associates an error message with a given expression id.
+func (ah *adaptingHelper) NewError(exprID int64, message string) *Error {
+ return ah.modernHelper.NewError(exprID, message)
+}
+
+func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr {
+ adapted := make([]ast.Expr, len(exprs))
+ for i, e := range exprs {
+ adapted[i] = mustAdaptToExpr(e)
+ }
+ return adapted
+}
+
+func mustAdaptToExpr(e *exprpb.Expr) ast.Expr {
+ out, _ := adaptToExpr(e)
+ return out
+}
+
+func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) {
+ if e == nil {
+ return nil, nil
+ }
+ out, err := ast.ProtoToExpr(e)
+ if err != nil {
+ return nil, wrapErr(e.GetId(), "proto conversion failure", err)
+ }
+ return out, nil
+}
+
+func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr {
+ out, _ := ast.ProtoToEntryExpr(e)
+ return out
+}
+
+func mustAdaptToProto(e ast.Expr) *exprpb.Expr {
+ out, _ := adaptToProto(e)
+ return out
+}
+
+func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) {
+ if e == nil {
+ return nil, nil
+ }
+ out, err := ast.ExprToProto(e)
+ if err != nil {
+ return nil, wrapErr(e.ID(), "expr conversion failure", err)
+ }
+ return out, nil
+}
+
+func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry {
+ out, _ := ast.EntryExprToProto(e)
+ return out
+}
+
+func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) {
+ ah, ok := meh.(*adaptingHelper)
+ if !ok {
+ return nil, common.NewError(0,
+ fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh),
+ common.NoLocation)
+ }
+ return ah.modernHelper, nil
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/optimizer.go b/hack/tools/vendor/github.com/google/cel-go/cel/optimizer.go
new file mode 100644
index 0000000000..c149abb703
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/optimizer.go
@@ -0,0 +1,535 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "sort"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order.
+//
+// The static optimizer normalizes expression ids and type-checking run between optimization
+// passes to ensure that the final optimized output is a valid expression with metadata consistent
+// with what would have been generated from a parsed and checked expression.
+//
+// Note: source position information is best-effort and likely wrong, but optimized expressions
+// should be suitable for calls to parser.Unparse.
+type StaticOptimizer struct {
+ optimizers []ASTOptimizer
+}
+
+// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied
+// to a checked expression.
+func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer {
+ return &StaticOptimizer{
+ optimizers: optimizers,
+ }
+}
+
+// Optimize applies a sequence of optimizations to an Ast within a given environment.
+//
+// If issues are encountered, the Issues.Err() return value will be non-nil.
+func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) {
+ // Make a copy of the AST to be optimized.
+ optimized := ast.Copy(a.impl)
+ ids := newIDGenerator(ast.MaxID(a.impl))
+
+ // Create the optimizer context, could be pooled in the future.
+ issues := NewIssues(common.NewErrors(a.Source()))
+ baseFac := ast.NewExprFactory()
+ exprFac := &optimizerExprFactory{
+ idGenerator: ids,
+ fac: baseFac,
+ sourceInfo: optimized.SourceInfo(),
+ }
+ ctx := &OptimizerContext{
+ optimizerExprFactory: exprFac,
+ Env: env,
+ Issues: issues,
+ }
+
+ // Apply the optimizations sequentially.
+ for _, o := range opt.optimizers {
+ optimized = o.Optimize(ctx, optimized)
+ if issues.Err() != nil {
+ return nil, issues
+ }
+ // Normalize expression id metadata including coordination with macro call metadata.
+ freshIDGen := newIDGenerator(0)
+ info := optimized.SourceInfo()
+ expr := optimized.Expr()
+ normalizeIDs(freshIDGen.renumberStable, expr, info)
+ cleanupMacroRefs(expr, info)
+
+ // Recheck the updated expression for any possible type-agreement or validation errors.
+ parsed := &Ast{
+ source: a.Source(),
+ impl: ast.NewAST(expr, info)}
+ checked, iss := ctx.Check(parsed)
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ optimized = checked.impl
+ }
+
+ // Return the optimized result.
+ return &Ast{
+ source: a.Source(),
+ impl: optimized,
+ }, nil
+}
+
+// normalizeIDs ensures that the metadata present with an AST is reset in a manner such
+// that the ids within the expression correspond to the ids within macros.
+func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) {
+ optimized.RenumberIDs(idGen)
+ if len(info.MacroCalls()) == 0 {
+ return
+ }
+
+ // Sort the macro ids to make sure that the renumbering of macro-specific variables
+ // is stable across normalization calls.
+ sortedMacroIDs := []int64{}
+ for id := range info.MacroCalls() {
+ sortedMacroIDs = append(sortedMacroIDs, id)
+ }
+ sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] })
+
+ // First, update the macro call ids themselves.
+ callIDMap := map[int64]int64{}
+ for _, id := range sortedMacroIDs {
+ callIDMap[id] = idGen(id)
+ }
+ // Then update the macro call definitions which refer to these ids, but
+ // ensure that the updates don't collide and remove macro entries which haven't
+ // been visited / updated yet.
+ type macroUpdate struct {
+ id int64
+ call ast.Expr
+ }
+ macroUpdates := []macroUpdate{}
+ for _, oldID := range sortedMacroIDs {
+ newID := callIDMap[oldID]
+ call, found := info.GetMacroCall(oldID)
+ if !found {
+ continue
+ }
+ call.RenumberIDs(idGen)
+ macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call})
+ info.ClearMacroCall(oldID)
+ }
+ for _, u := range macroUpdates {
+ info.SetMacroCall(u.id, u.call)
+ }
+}
+
+func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) {
+ if len(info.MacroCalls()) == 0 {
+ return
+ }
+
+ // Sanitize the macro call references once the optimized expression has been computed
+ // and the ids normalized between the expression and the macros.
+ exprRefMap := make(map[int64]struct{})
+ ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.ID() == 0 {
+ return
+ }
+ exprRefMap[e.ID()] = struct{}{}
+ }))
+ // Update the macro call id references to ensure that macro pointers are
+ // updated consistently across macros.
+ for _, call := range info.MacroCalls() {
+ ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.ID() == 0 {
+ return
+ }
+ exprRefMap[e.ID()] = struct{}{}
+ }))
+ }
+ for id := range info.MacroCalls() {
+ if _, found := exprRefMap[id]; !found {
+ info.ClearMacroCall(id)
+ }
+ }
+}
+
+// newIDGenerator ensures that new ids are only created the first time they are encountered.
+func newIDGenerator(seed int64) *idGenerator {
+ return &idGenerator{
+ idMap: make(map[int64]int64),
+ seed: seed,
+ }
+}
+
+type idGenerator struct {
+ idMap map[int64]int64
+ seed int64
+}
+
+func (gen *idGenerator) nextID() int64 {
+ gen.seed++
+ return gen.seed
+}
+
+func (gen *idGenerator) renumberStable(id int64) int64 {
+ if id == 0 {
+ return 0
+ }
+ if newID, found := gen.idMap[id]; found {
+ return newID
+ }
+ nextID := gen.nextID()
+ gen.idMap[id] = nextID
+ return nextID
+}
+
+// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate
+// subexpressions and report any errors encountered along the way. The context also embeds the
+// optimizerExprFactory which can be used to generate new sub-expressions with expression ids
+// consistent with the expectations of a parsed expression.
+type OptimizerContext struct {
+ *Env
+ *optimizerExprFactory
+ *Issues
+}
+
+// ExtendEnv auguments the context's environment with the additional options.
+func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error {
+ e, err := opt.Env.Extend(opts...)
+ if err != nil {
+ return err
+ }
+ opt.Env = e
+ return nil
+}
+
+// ASTOptimizer applies an optimization over an AST and returns the optimized result.
+type ASTOptimizer interface {
+ // Optimize optimizes a type-checked AST within an Environment and accumulates any issues.
+ Optimize(*OptimizerContext, *ast.AST) *ast.AST
+}
+
+type optimizerExprFactory struct {
+ *idGenerator
+ fac ast.ExprFactory
+ sourceInfo *ast.SourceInfo
+}
+
+// NewAST creates an AST from the current expression using the tracked source info which
+// is modified and managed by the OptimizerContext.
+func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST {
+ return ast.NewAST(expr, opt.sourceInfo)
+}
+
+// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the
+// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions
+// between copies.
+//
+// Use this method before attempting to merge the expression from AST into another.
+func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) {
+ idGen := newIDGenerator(opt.nextID())
+ defer func() { opt.seed = idGen.nextID() }()
+ copyExpr := opt.fac.CopyExpr(a.Expr())
+ copyInfo := ast.CopySourceInfo(a.SourceInfo())
+ normalizeIDs(idGen.renumberStable, copyExpr, copyInfo)
+ return copyExpr, copyInfo
+}
+
+// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being
+// optimized.
+func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr {
+ copyExpr, copyInfo := opt.CopyAST(a)
+ for macroID, call := range copyInfo.MacroCalls() {
+ opt.SetMacroCall(macroID, call)
+ }
+ return copyExpr
+}
+
+// ClearMacroCall clears the macro at the given expression id.
+func (opt *optimizerExprFactory) ClearMacroCall(id int64) {
+ opt.sourceInfo.ClearMacroCall(id)
+}
+
+// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info
+// metadata.
+func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) {
+ opt.sourceInfo.SetMacroCall(id, expr)
+}
+
+// MacroCalls returns the map of macro calls currently in the context.
+func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr {
+ return opt.sourceInfo.MacroCalls()
+}
+
+// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression
+// representing the unexpanded call signature to be inserted into the source info macro call metadata.
+func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) {
+ varID := opt.nextID()
+ remainingID := opt.nextID()
+ remaining = opt.fac.CopyExpr(remaining)
+ remaining.RenumberIDs(func(id int64) int64 {
+ if id == macroID {
+ return remainingID
+ }
+ return id
+ })
+ if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists {
+ opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call))
+ }
+
+ astExpr = opt.fac.NewComprehension(macroID,
+ opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}),
+ "#unused",
+ varName,
+ opt.fac.CopyExpr(varInit),
+ opt.fac.NewLiteral(opt.nextID(), types.False),
+ opt.fac.NewIdent(varID, varName),
+ remaining)
+
+ macroExpr = opt.fac.NewMemberCall(0, "bind",
+ opt.fac.NewIdent(opt.nextID(), "cel"),
+ opt.fac.NewIdent(varID, varName),
+ opt.fac.CopyExpr(varInit),
+ opt.fac.CopyExpr(remaining))
+ opt.sanitizeMacro(macroID, macroExpr)
+ return
+}
+
+// NewCall creates a global function call invocation expression.
+//
+// Example:
+//
+// countByField(list, fieldName)
+// - function: countByField
+// - args: [list, fieldName]
+func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr {
+ return opt.fac.NewCall(opt.nextID(), function, args...)
+}
+
+// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call.
+//
+// Example:
+//
+// list.countByField(fieldName)
+// - function: countByField
+// - target: list
+// - args: [fieldName]
+func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+ return opt.fac.NewMemberCall(opt.nextID(), function, target, args...)
+}
+
+// NewIdent creates a new identifier expression.
+//
+// Examples:
+//
+// - simple_var_name
+// - qualified.subpackage.var_name
+func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr {
+ return opt.fac.NewIdent(opt.nextID(), name)
+}
+
+// NewLiteral creates a new literal expression value.
+//
+// The range of valid values for a literal generated during optimization is different than for expressions
+// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can
+// be converted back to a literal-like form.
+func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr {
+ return opt.fac.NewLiteral(opt.nextID(), value)
+}
+
+// NewList creates a list expression with a set of optional indices.
+//
+// Examples:
+//
+// [a, b]
+// - elems: [a, b]
+// - optIndices: []
+//
+// [a, ?b, ?c]
+// - elems: [a, b, c]
+// - optIndices: [1, 2]
+func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr {
+ return opt.fac.NewList(opt.nextID(), elems, optIndices)
+}
+
+// NewMap creates a map from a set of entry expressions which contain a key and value expression.
+func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr {
+ return opt.fac.NewMap(opt.nextID(), entries)
+}
+
+// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the
+// entry is optional.
+//
+// Examples:
+//
+// {a: b}
+// - key: a
+// - value: b
+// - optional: false
+//
+// {?a: ?b}
+// - key: a
+// - value: b
+// - optional: true
+func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr {
+ return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional)
+}
+
+// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded
+// has() macro call signature to be inserted into the source info macro call metadata.
+func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) {
+ sel := s.AsSelect()
+ astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName())
+ macroExpr = opt.fac.NewCall(0, "has",
+ opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName()))
+ opt.sanitizeMacro(macroID, macroExpr)
+ return
+}
+
+// NewSelect creates a select expression where a field value is selected from an operand.
+//
+// Example:
+//
+// msg.field_name
+// - operand: msg
+// - field: field_name
+func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr {
+ return opt.fac.NewSelect(opt.nextID(), operand, field)
+}
+
+// NewStruct creates a new typed struct value with an set of field initializations.
+//
+// Example:
+//
+// pkg.TypeName{field: value}
+// - typeName: pkg.TypeName
+// - fields: [{field: value}]
+func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr {
+ return opt.fac.NewStruct(opt.nextID(), typeName, fields)
+}
+
+// NewStructField creates a struct field initialization.
+//
+// Examples:
+//
+// {count: 3u}
+// - field: count
+// - value: 3u
+// - optional: false
+//
+// {?count: x}
+// - field: count
+// - value: x
+// - optional: true
+func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr {
+ return opt.fac.NewStructField(opt.nextID(), field, value, isOptional)
+}
+
+// UpdateExpr updates the target expression with the updated content while preserving macro metadata.
+//
+// There are four scenarios during the update to consider:
+// 1. target is not macro, updated is not macro
+// 2. target is macro, updated is not macro
+// 3. target is macro, updated is macro
+// 4. target is not macro, updated is macro
+//
+// When the target is a macro already, it may either be updated to a new macro function
+// body if the update is also a macro, or it may be removed altogether if the update is
+// a macro.
+//
+// When the update is a macro, then the target references within other macros must be
+// updated to point to the new updated macro. Otherwise, other macros which pointed to
+// the target body must be replaced with copies of the updated expression body.
+func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) {
+ // Update the expression
+ target.SetKindCase(updated)
+
+ // Early return if there's no macros present sa the source info reflects the
+ // macro set from the target and updated expressions.
+ if len(opt.sourceInfo.MacroCalls()) == 0 {
+ return
+ }
+ // Determine whether the target expression was a macro.
+ _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID())
+
+ // Determine whether the updated expression was a macro.
+ updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID())
+
+ if updatedIsMacro {
+ // If the updated call was a macro, then updated id maps to target id,
+ // and the updated macro moves into the target id slot.
+ opt.sourceInfo.ClearMacroCall(updated.ID())
+ opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro)
+ } else if targetIsMacro {
+ // Otherwise if the target expr was a macro, but is no longer, clear
+ // the macro reference.
+ opt.sourceInfo.ClearMacroCall(target.ID())
+ }
+
+ // Punch holes in the updated value where macros references exist.
+ macroExpr := opt.fac.CopyExpr(target)
+ macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists {
+ e.SetKindCase(nil)
+ }
+ })
+ ast.PostOrderVisit(macroExpr, macroRefVisitor)
+
+ // Update any references to the expression within a macro
+ macroVisitor := ast.NewExprVisitor(func(call ast.Expr) {
+ // Update the target expression to point to the macro expression which
+ // will be empty if the updated expression was a macro.
+ if call.ID() == target.ID() {
+ call.SetKindCase(opt.fac.CopyExpr(macroExpr))
+ }
+ // Update the macro call expression if it refers to the updated expression
+ // id which has since been remapped to the target id.
+ if call.ID() == updated.ID() {
+ // Either ensure the expression is a macro reference or a populated with
+ // the relevant sub-expression if the updated expr was not a macro.
+ if updatedIsMacro {
+ call.SetKindCase(nil)
+ } else {
+ call.SetKindCase(opt.fac.CopyExpr(macroExpr))
+ }
+ // Since SetKindCase does not renumber the id, ensure the references to
+ // the old 'updated' id are mapped to the target id.
+ call.RenumberIDs(func(id int64) int64 {
+ if id == updated.ID() {
+ return target.ID()
+ }
+ return id
+ })
+ }
+ })
+ for _, call := range opt.sourceInfo.MacroCalls() {
+ ast.PostOrderVisit(call, macroVisitor)
+ }
+}
+
+func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) {
+ macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID {
+ e.SetKindCase(nil)
+ }
+ })
+ ast.PostOrderVisit(macroExpr, macroRefVisitor)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/options.go b/hack/tools/vendor/github.com/google/cel-go/cel/options.go
new file mode 100644
index 0000000000..69c6942634
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/options.go
@@ -0,0 +1,667 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/dynamicpb"
+
+ "github.com/google/cel-go/checker"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ descpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// These constants beginning with "Feature" enable optional behavior in
+// the library. See the documentation for each constant to see its
+// effects, compatibility restrictions, and standard conformance.
+const (
+ _ = iota
+
+ // Enable the tracking of function call expressions replaced by macros.
+ featureEnableMacroCallTracking
+
+ // Enable the use of cross-type numeric comparisons at the type-checker.
+ featureCrossTypeNumericComparisons
+
+ // Enable eager validation of declarations to ensure that Env values created
+ // with `Extend` inherit a validated list of declarations from the parent Env.
+ featureEagerlyValidateDeclarations
+
+ // Enable the use of the default UTC timezone when a timezone is not specified
+ // on a CEL timestamp operation. This fixes the scenario where the input time
+ // is not already in UTC.
+ featureDefaultUTCTimeZone
+
+ // Enable the serialization of logical operator ASTs as variadic calls, thus
+ // compressing the logic graph to a single call when multiple like-operator
+ // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d])
+ featureVariadicLogicalASTs
+
+ // Enable error generation when a presence test or optional field selection is
+ // performed on a primitive type.
+ featureEnableErrorOnBadPresenceTest
+)
+
+// EnvOption is a functional interface for configuring the environment.
+type EnvOption func(e *Env) (*Env, error)
+
+// ClearMacros options clears all parser macros.
+//
+// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
+// comprehensions such as `all` and `exists` are enabled only via macros.
+func ClearMacros() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.macros = NoMacros
+ return e, nil
+ }
+}
+
+// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one.
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeAdapter(adapter types.Adapter) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.adapter = adapter
+ return e, nil
+ }
+}
+
+// CustomTypeProvider replaces the types.Provider implementation with a custom one.
+//
+// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated)
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeProvider(provider any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ var err error
+ e.provider, err = maybeInteropProvider(provider)
+ return e, err
+ }
+}
+
+// Declarations option extends the declaration set configured in the environment.
+//
+// Note: Declarations will by default be appended to the pre-existing declaration set configured
+// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
+// purely custom set of declarations use NewCustomEnv.
+func Declarations(decls ...*exprpb.Decl) EnvOption {
+ declOpts := []EnvOption{}
+ var err error
+ var opt EnvOption
+ // Convert the declarations to `EnvOption` values ahead of time.
+ // Surface any errors in conversion when the options are applied.
+ for _, d := range decls {
+ opt, err = ExprDeclToDeclaration(d)
+ if err != nil {
+ break
+ }
+ declOpts = append(declOpts, opt)
+ }
+ return func(e *Env) (*Env, error) {
+ if err != nil {
+ return nil, err
+ }
+ for _, o := range declOpts {
+ e, err = o(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return e, nil
+ }
+}
+
+// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught
+// at the time of the `NewEnv` call.
+//
+// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value.
+// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated
+// as declarations will be collision-checked at most once and only incrementally by way of `Extend`
+//
+// Disabled by default as not all environments are used for type-checking.
+func EagerlyValidateDeclarations(enabled bool) EnvOption {
+ return features(featureEagerlyValidateDeclarations, enabled)
+}
+
+// HomogeneousAggregateLiterals disables mixed type list and map literal values.
+//
+// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
+// expression, as well as via conversion of well-known dynamic types, or with unchecked
+// expressions.
+func HomogeneousAggregateLiterals() EnvOption {
+ return ASTValidators(ValidateHomogeneousAggregateLiterals())
+}
+
+// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single
+// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as
+// it will reduce the number of recursive calls needed to deserialize the AST later.
+//
+// For example, given the following expression the call graph will be rendered accordingly:
+//
+// expression: a && b && c && (d || e)
+// ast: call(_&&_, [a, b, c, call(_||_, [d, e])])
+func variadicLogicalOperatorASTs() EnvOption {
+ return features(featureVariadicLogicalASTs, true)
+}
+
+// Macros option extends the macro set configured in the environment.
+//
+// Note: This option must be specified after ClearMacros if used together.
+func Macros(macros ...Macro) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.macros = append(e.macros, macros...)
+ return e, nil
+ }
+}
+
+// Container sets the container for resolving variable names. Defaults to an empty container.
+//
+// If all references within an expression are relative to a protocol buffer package, then
+// specifying a container of `google.type` would make it possible to write expressions such as
+// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
+func Container(name string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ cont, err := e.Container.Extend(containers.Name(name))
+ if err != nil {
+ return nil, err
+ }
+ e.Container = cont
+ return e, nil
+ }
+}
+
+// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
+//
+// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
+// Abbreviations can be useful when working with variables, functions, and especially types from
+// multiple namespaces:
+//
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
+//
+// Only one the qualified names above may be used as the CEL container, so at least one of these
+// references must be a long qualified name within an otherwise short CEL program. Using the
+// following abbreviations, the program becomes much simpler:
+//
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
+//
+// There are a few rules for the qualified names and the simple abbreviations generated from them:
+// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
+// - The last element in the qualified name is the abbreviation.
+// - Abbreviations must not collide with each other.
+// - The abbreviation must not collide with unqualified names in use.
+//
+// Abbreviations are distinct from container-based references in the following important ways:
+// - Abbreviations must expand to a fully-qualified name.
+// - Expanded abbreviations do not participate in namespace resolution.
+// - Abbreviation expansion is done instead of the container search for a matching identifier.
+// - Containers follow C++ namespace resolution rules with searches from the most qualified name
+//
+// to the least qualified name.
+//
+// - Container references within the CEL program may be relative, and are resolved to fully
+//
+// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// If there is ever a case where an identifier could be in both the container and as an
+// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
+// preserved between compilations even as the container evolves.
+func Abbrevs(qualifiedNames ...string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...))
+ if err != nil {
+ return nil, err
+ }
+ e.Container = cont
+ return e, nil
+ }
+}
+
+// customTypeRegistry is an internal-only interface containing the minimum methods required to support
+// custom types. It is a subset of methods from ref.TypeRegistry.
+type customTypeRegistry interface {
+ RegisterDescriptor(protoreflect.FileDescriptor) error
+ RegisterType(...ref.Type) error
+}
+
+// Types adds one or more type declarations to the environment, allowing for construction of
+// type-literals whose definitions are included in the common expression built-in set.
+//
+// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
+// provided to this option will result in an error.
+//
+// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
+// environment by default.
+//
+// Note: This option must be specified after the CustomTypeProvider option when used together.
+func Types(addTypes ...any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ reg, isReg := e.provider.(customTypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ for _, t := range addTypes {
+ switch v := t.(type) {
+ case proto.Message:
+ fdMap := pb.CollectFileDescriptorSet(v)
+ for _, fd := range fdMap {
+ err := reg.RegisterDescriptor(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ case ref.Type:
+ err := reg.RegisterType(v)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unsupported type: %T", t)
+ }
+ }
+ return e, nil
+ }
+}
+
+// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files,
+// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided.
+//
+// Note that messages instantiated from these descriptors will be *dynamicpb.Message values
+// rather than the concrete message type.
+//
+// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
+// extension or by re-using the same EnvOption with another NewEnv() call.
+func TypeDescs(descs ...any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ reg, isReg := e.provider.(customTypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a
+ // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other
+ // and will not resolve properly unless they are part of the same set.
+ var fds *descpb.FileDescriptorSet
+ for _, d := range descs {
+ switch f := d.(type) {
+ case *descpb.FileDescriptorProto:
+ if fds == nil {
+ fds = &descpb.FileDescriptorSet{
+ File: []*descpb.FileDescriptorProto{},
+ }
+ }
+ fds.File = append(fds.File, f)
+ }
+ }
+ if fds != nil {
+ if err := registerFileSet(reg, fds); err != nil {
+ return nil, err
+ }
+ }
+ for _, d := range descs {
+ switch f := d.(type) {
+ case *protoregistry.Files:
+ if err := registerFiles(reg, f); err != nil {
+ return nil, err
+ }
+ case protoreflect.FileDescriptor:
+ if err := reg.RegisterDescriptor(f); err != nil {
+ return nil, err
+ }
+ case *descpb.FileDescriptorSet:
+ if err := registerFileSet(reg, f); err != nil {
+ return nil, err
+ }
+ case *descpb.FileDescriptorProto:
+ // skip, handled as a synthetic file descriptor set.
+ default:
+ return nil, fmt.Errorf("unsupported type descriptor: %T", d)
+ }
+ }
+ return e, nil
+ }
+}
+
+func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error {
+ files, err := protodesc.NewFiles(fileSet)
+ if err != nil {
+ return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
+ }
+ return registerFiles(reg, files)
+}
+
+func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error {
+ var err error
+ files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+ err = reg.RegisterDescriptor(fd)
+ return err == nil
+ })
+ return err
+}
+
+// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
+type ProgramOption func(p *prog) (*prog, error)
+
+// CustomDecorator appends an InterpreterDecorator to the program.
+//
+// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
+func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ p.decorators = append(p.decorators, dec)
+ return p, nil
+ }
+}
+
+// Functions adds function overloads that extend or override the set of CEL built-ins.
+//
+// Deprecated: use Function() instead to declare the function, its overload signatures,
+// and the overload implementations.
+func Functions(funcs ...*functions.Overload) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ if err := p.dispatcher.Add(funcs...); err != nil {
+ return nil, err
+ }
+ return p, nil
+ }
+}
+
+// Globals sets the global variable values for a given program. These values may be shadowed by
+// variables with the same name provided to the Eval() call. If Globals is used in a Library with
+// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
+//
+// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
+func Globals(vars any) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ defaultVars, err := interpreter.NewActivation(vars)
+ if err != nil {
+ return nil, err
+ }
+ if p.defaultVars != nil {
+ defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars)
+ }
+ p.defaultVars = defaultVars
+ return p, nil
+ }
+}
+
+// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used
+// to compile regex string constants at program creation time and report any errors and then use the
+// compiled regex for all regex function invocations.
+func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...)
+ return p, nil
+ }
+}
+
+// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
+// in the output result.
+type EvalOption int
+
+const (
+ // OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
+ OptTrackState EvalOption = 1 << iota
+
+ // OptExhaustiveEval causes the runtime to disable short-circuits and track state.
+ OptExhaustiveEval EvalOption = 1< 0 {
+ decorators = append(decorators, interpreter.InterruptableEval())
+ }
+ // Enable constant folding first.
+ if p.evalOpts&OptOptimize == OptOptimize {
+ decorators = append(decorators, interpreter.Optimize())
+ p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
+ }
+ // Enable regex compilation of constants immediately after folding constants.
+ if len(p.regexOptimizations) > 0 {
+ decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
+ }
+
+ // Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
+ factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
+ costTracker.Estimator = p.callCostEstimator
+ costTracker.Limit = p.costLimit
+ for _, costOpt := range p.costOptions {
+ err := costOpt(costTracker)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
+ // prevents the underlying memory from being shared between factory function calls causing
+ // undesired mutations.
+ decs := decorators[:len(decorators):len(decorators)]
+ var observers []interpreter.EvalObserver
+
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
+ // EvalStateObserver is required for OptExhaustiveEval.
+ observers = append(observers, interpreter.EvalStateObserver(state))
+ }
+ if p.evalOpts&OptTrackCost == OptTrackCost {
+ observers = append(observers, interpreter.CostObserver(costTracker))
+ }
+
+ // Enable exhaustive eval over a basic observer since it offers a superset of features.
+ if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
+ decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
+ } else if len(observers) > 0 {
+ decs = append(decs, interpreter.Observe(observers...))
+ }
+
+ return p.clone().initInterpretable(a, decs)
+ }
+ return newProgGen(factory)
+ }
+ return p.initInterpretable(a, decorators)
+}
+
+func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) {
+ // When the AST has been exprAST it contains metadata that can be used to speed up program execution.
+ interpretable, err := p.interpreter.NewInterpretable(a, decs...)
+ if err != nil {
+ return nil, err
+ }
+ p.interpretable = interpretable
+ return p, nil
+}
+
+// Eval implements the Program interface method.
+func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
+ // Configure error recovery for unexpected panics during evaluation. Note, the use of named
+ // return values makes it possible to modify the error response during the recovery
+ // function.
+ defer func() {
+ if r := recover(); r != nil {
+ switch t := r.(type) {
+ case interpreter.EvalCancelledError:
+ err = t
+ default:
+ err = fmt.Errorf("internal error: %v", r)
+ }
+ }
+ }()
+ // Build a hierarchical activation if there are default vars set.
+ var vars interpreter.Activation
+ switch v := input.(type) {
+ case interpreter.Activation:
+ vars = v
+ case map[string]any:
+ vars = activationPool.Setup(v)
+ defer activationPool.Put(vars)
+ default:
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
+ }
+ if p.defaultVars != nil {
+ vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
+ }
+ v = p.interpretable.Eval(vars)
+ // The output of an internal Eval may have a value (`v`) that is a types.Err. This step
+ // translates the CEL value to a Go error response. This interface does not quite match the
+ // RPC signature which allows for multiple errors to be returned, but should be sufficient.
+ if types.IsError(v) {
+ err = v.(*types.Err)
+ }
+ return
+}
+
+// ContextEval implements the Program interface.
+func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
+ if ctx == nil {
+ return nil, nil, fmt.Errorf("context can not be nil")
+ }
+ // Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
+ // exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
+ var vars interpreter.Activation
+ switch v := input.(type) {
+ case interpreter.Activation:
+ vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
+ defer ctxActivationPool.Put(vars)
+ case map[string]any:
+ rawVars := activationPool.Setup(v)
+ defer activationPool.Put(rawVars)
+ vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
+ defer ctxActivationPool.Put(vars)
+ default:
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
+ }
+ return p.Eval(vars)
+}
+
+// progFactory is a helper alias for marking a program creation factory function.
+type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
+
+// progGen holds a reference to a progFactory instance and implements the Program interface.
+type progGen struct {
+ factory progFactory
+}
+
+// newProgGen tests the factory object by calling it once and returns a factory-based Program if
+// the test is successful.
+func newProgGen(factory progFactory) (Program, error) {
+ // Test the factory to make sure that configuration errors are spotted at config
+ tracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, err
+ }
+ _, err = factory(interpreter.NewEvalState(), tracker)
+ if err != nil {
+ return nil, err
+ }
+ return &progGen{factory: factory}, nil
+}
+
+// Eval implements the Program interface method.
+func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
+ // The factory based Eval() differs from the standard evaluation model in that it generates a
+ // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
+ // results.
+ state := interpreter.NewEvalState()
+ costTracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ det := &EvalDetails{state: state, costTracker: costTracker}
+
+ // Generate a new instance of the interpretable using the factory configured during the call to
+ // newProgram(). It is incredibly unlikely that the factory call will generate an error given
+ // the factory test performed within the Program() call.
+ p, err := gen.factory(state, costTracker)
+ if err != nil {
+ return nil, det, err
+ }
+
+ // Evaluate the input, returning the result and the 'state' within EvalDetails.
+ v, _, err := p.Eval(input)
+ if err != nil {
+ return v, det, err
+ }
+ return v, det, nil
+}
+
+// ContextEval implements the Program interface method.
+func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
+ if ctx == nil {
+ return nil, nil, fmt.Errorf("context can not be nil")
+ }
+ // The factory based Eval() differs from the standard evaluation model in that it generates a
+ // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
+ // results.
+ state := interpreter.NewEvalState()
+ costTracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ det := &EvalDetails{state: state, costTracker: costTracker}
+
+ // Generate a new instance of the interpretable using the factory configured during the call to
+ // newProgram(). It is incredibly unlikely that the factory call will generate an error given
+ // the factory test performed within the Program() call.
+ p, err := gen.factory(state, costTracker)
+ if err != nil {
+ return nil, det, err
+ }
+
+ // Evaluate the input, returning the result and the 'state' within EvalDetails.
+ v, _, err := p.ContextEval(ctx, input)
+ if err != nil {
+ return v, det, err
+ }
+ return v, det, nil
+}
+
+type ctxEvalActivation struct {
+ parent interpreter.Activation
+ interrupt <-chan struct{}
+ interruptCheckCount uint
+ interruptCheckFrequency uint
+}
+
+// ResolveName implements the Activation interface method, but adds a special #interrupted variable
+// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
+func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
+ if name == "#interrupted" {
+ a.interruptCheckCount++
+ if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
+ select {
+ case <-a.interrupt:
+ return true, true
+ default:
+ return nil, false
+ }
+ }
+ return nil, false
+ }
+ return a.parent.ResolveName(name)
+}
+
+func (a *ctxEvalActivation) Parent() interpreter.Activation {
+ return a.parent
+}
+
+func newCtxEvalActivationPool() *ctxEvalActivationPool {
+ return &ctxEvalActivationPool{
+ Pool: sync.Pool{
+ New: func() any {
+ return &ctxEvalActivation{}
+ },
+ },
+ }
+}
+
+type ctxEvalActivationPool struct {
+ sync.Pool
+}
+
+// Setup initializes a pooled Activation with the ability check for context.Context cancellation
+func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
+ a := p.Pool.Get().(*ctxEvalActivation)
+ a.parent = vars
+ a.interrupt = done
+ a.interruptCheckCount = 0
+ a.interruptCheckFrequency = interruptCheckRate
+ return a
+}
+
+type evalActivation struct {
+ vars map[string]any
+ lazyVars map[string]any
+}
+
+// ResolveName looks up the value of the input variable name, if found.
+//
+// Lazy bindings may be supplied within the map-based input in either of the following forms:
+// - func() any
+// - func() ref.Val
+//
+// The lazy binding will only be invoked once per evaluation.
+//
+// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
+// the types.Adapter configured in the environment.
+func (a *evalActivation) ResolveName(name string) (any, bool) {
+ v, found := a.vars[name]
+ if !found {
+ return nil, false
+ }
+ switch obj := v.(type) {
+ case func() ref.Val:
+ if resolved, found := a.lazyVars[name]; found {
+ return resolved, true
+ }
+ lazy := obj()
+ a.lazyVars[name] = lazy
+ return lazy, true
+ case func() any:
+ if resolved, found := a.lazyVars[name]; found {
+ return resolved, true
+ }
+ lazy := obj()
+ a.lazyVars[name] = lazy
+ return lazy, true
+ default:
+ return obj, true
+ }
+}
+
+// Parent implements the interpreter.Activation interface
+func (a *evalActivation) Parent() interpreter.Activation {
+ return nil
+}
+
+func newEvalActivationPool() *evalActivationPool {
+ return &evalActivationPool{
+ Pool: sync.Pool{
+ New: func() any {
+ return &evalActivation{lazyVars: make(map[string]any)}
+ },
+ },
+ }
+}
+
+type evalActivationPool struct {
+ sync.Pool
+}
+
+// Setup initializes a pooled Activation object with the map input.
+func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation {
+ a := p.Pool.Get().(*evalActivation)
+ a.vars = vars
+ return a
+}
+
+func (p *evalActivationPool) Put(value any) {
+ a := value.(*evalActivation)
+ for k := range a.lazyVars {
+ delete(a.lazyVars, k)
+ }
+ p.Pool.Put(a)
+}
+
+var (
+ // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
+ activationPool = newEvalActivationPool()
+
+ // ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
+ ctxActivationPool = newCtxEvalActivationPool()
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/validator.go b/hack/tools/vendor/github.com/google/cel-go/cel/validator.go
new file mode 100644
index 0000000000..b50c674520
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/cel/validator.go
@@ -0,0 +1,375 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/overloads"
+)
+
+const (
+ homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
+
+ // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
+ // the set of function names which are exempt from homogeneous type checks. The expected type
+ // is a string list of function names.
+ //
+ // As an example, the `.format([args])` call expects the input arguments list to be
+ // comprised of a variety of types which correspond to the types expected by the format control
+ // clauses; however, all other uses of a mixed element type list, would be unexpected.
+ HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
+)
+
+// ASTValidators configures a set of ASTValidator instances into the target environment.
+//
+// Validators are applied in the order in which the are specified and are treated as singletons.
+// The same ASTValidator with a given name will not be applied more than once.
+func ASTValidators(validators ...ASTValidator) EnvOption {
+ return func(e *Env) (*Env, error) {
+ for _, v := range validators {
+ if !e.HasValidator(v.Name()) {
+ e.validators = append(e.validators, v)
+ }
+ }
+ return e, nil
+ }
+}
+
+// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment.
+//
+// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be
+// reported to the caller.
+type ASTValidator interface {
+ // Name returns the name of the validator. Names must be unique.
+ Name() string
+
+ // Validate validates a given Ast within an Environment and collects a set of potential issues.
+ //
+ // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to
+ // the invocation of the Validate call. The expectation is that the validator configuration
+ // is created in sequence and immutable once provided to the Validate call.
+ //
+ // See individual validators for more information on their configuration keys and configuration
+ // properties.
+ Validate(*Env, ValidatorConfig, *ast.AST, *Issues)
+}
+
+// ValidatorConfig provides an accessor method for querying validator configuration state.
+type ValidatorConfig interface {
+ GetOrDefault(name string, value any) any
+}
+
+// MutableValidatorConfig provides mutation methods for querying and updating validator configuration
+// settings.
+type MutableValidatorConfig interface {
+ ValidatorConfig
+ Set(name string, value any) error
+}
+
+// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator,
+// participates in validator configuration settings.
+//
+// This interface may be split from the expectation of being an ASTValidator instance in the future.
+type ASTValidatorConfigurer interface {
+ Configure(MutableValidatorConfig) error
+}
+
+// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces.
+type validatorConfig struct {
+ data map[string]any
+}
+
+// newValidatorConfig initializes the validator config with default values for core CEL validators.
+func newValidatorConfig() *validatorConfig {
+ return &validatorConfig{
+ data: map[string]any{
+ HomogeneousAggregateLiteralExemptFunctions: []string{},
+ },
+ }
+}
+
+// GetOrDefault returns the configured value for the name, if present, else the input default value.
+//
+// Note, the type-agreement between the input default and configured value is not checked on read.
+func (config *validatorConfig) GetOrDefault(name string, value any) any {
+ v, found := config.data[name]
+ if !found {
+ return value
+ }
+ return v
+}
+
+// Set configures a validator option with the given name and value.
+//
+// If the value had previously been set, the new value must have the same reflection type as the old one,
+// or the call will error.
+func (config *validatorConfig) Set(name string, value any) error {
+ v, found := config.data[name]
+ if found && reflect.TypeOf(v) != reflect.TypeOf(value) {
+ return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v)
+ }
+ config.data[name] = value
+ return nil
+}
+
+// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors.
+//
+// - Validate duration and timestamp literals
+// - Ensure regex strings are valid
+// - Disable mixed type list and map literals
+func ExtendedValidations() EnvOption {
+ return ASTValidators(
+ ValidateDurationLiterals(),
+ ValidateTimestampLiterals(),
+ ValidateRegexLiterals(),
+ ValidateHomogeneousAggregateLiterals(),
+ )
+}
+
+// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check.
+func ValidateDurationLiterals() ASTValidator {
+ return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall)
+}
+
+// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check.
+func ValidateTimestampLiterals() ASTValidator {
+ return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall)
+}
+
+// ValidateRegexLiterals ensures that regex patterns are validated after type-check.
+func ValidateRegexLiterals() ASTValidator {
+ return newFormatValidator(overloads.Matches, 0, compileRegex)
+}
+
+// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e.
+// no mixed list element types or mixed map key or map value types.
+//
+// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all
+// literals which occur within string format calls.
+func ValidateHomogeneousAggregateLiterals() ASTValidator {
+ return homogeneousAggregateLiteralValidator{}
+}
+
+// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit.
+//
+// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial
+// time to complete.
+//
+// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have
+// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable
+// assignments and supplies an empty iteration range, so they won't count against the nesting limit either.
+func ValidateComprehensionNestingLimit(limit int) ASTValidator {
+ return nestingLimitValidator{limit: limit}
+}
+
+type argChecker func(env *Env, call, arg ast.Expr) error
+
+func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
+ return formatValidator{
+ funcName: funcName,
+ check: check,
+ argNum: argNum,
+ }
+}
+
+type formatValidator struct {
+ funcName string
+ argNum int
+ check argChecker
+}
+
+// Name returns the unique name of this function format validator.
+func (v formatValidator) Name() string {
+ return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
+}
+
+// Validate searches the AST for uses of a given function name with a constant argument and performs a check
+// on whether the argument is a valid literal value.
+func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
+ root := ast.NavigateAST(a)
+ funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
+ for _, call := range funcCalls {
+ callArgs := call.AsCall().Args()
+ if len(callArgs) <= v.argNum {
+ continue
+ }
+ litArg := callArgs[v.argNum]
+ if litArg.Kind() != ast.LiteralKind {
+ continue
+ }
+ if err := v.check(e, call, litArg); err != nil {
+ iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName)
+ }
+ }
+}
+
+func evalCall(env *Env, call, arg ast.Expr) error {
+ ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))}
+ prg, err := env.Program(ast)
+ if err != nil {
+ return err
+ }
+ _, _, err = prg.Eval(NoVars())
+ return err
+}
+
+func compileRegex(_ *Env, _, arg ast.Expr) error {
+ pattern := arg.AsLiteral().Value().(string)
+ _, err := regexp.Compile(pattern)
+ return err
+}
+
+type homogeneousAggregateLiteralValidator struct{}
+
+// Name returns the unique name of the homogeneous type validator.
+func (homogeneousAggregateLiteralValidator) Name() string {
+ return homogeneousValidatorName
+}
+
+// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
+//
+// This validator makes an exception for list and map literals which occur at any level of nesting within
+// string format calls.
+func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) {
+ var exemptedFunctions []string
+ exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
+ root := ast.NavigateAST(a)
+ listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
+ for _, listExpr := range listExprs {
+ if inExemptFunction(listExpr, exemptedFunctions) {
+ continue
+ }
+ l := listExpr.AsList()
+ elements := l.Elements()
+ optIndices := l.OptionalIndices()
+ var elemType *Type
+ for i, e := range elements {
+ et := a.GetType(e.ID())
+ if isOptionalIndex(i, optIndices) {
+ et = et.Parameters()[0]
+ }
+ if elemType == nil {
+ elemType = et
+ continue
+ }
+ if !elemType.IsEquivalentType(et) {
+ v.typeMismatch(iss, e.ID(), elemType, et)
+ break
+ }
+ }
+ }
+ mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind))
+ for _, mapExpr := range mapExprs {
+ if inExemptFunction(mapExpr, exemptedFunctions) {
+ continue
+ }
+ m := mapExpr.AsMap()
+ entries := m.Entries()
+ var keyType, valType *Type
+ for _, e := range entries {
+ mapEntry := e.AsMapEntry()
+ key, val := mapEntry.Key(), mapEntry.Value()
+ kt, vt := a.GetType(key.ID()), a.GetType(val.ID())
+ if mapEntry.IsOptional() {
+ vt = vt.Parameters()[0]
+ }
+ if keyType == nil && valType == nil {
+ keyType, valType = kt, vt
+ continue
+ }
+ if !keyType.IsEquivalentType(kt) {
+ v.typeMismatch(iss, key.ID(), keyType, kt)
+ }
+ if !valType.IsEquivalentType(vt) {
+ v.typeMismatch(iss, val.ID(), valType, vt)
+ }
+ }
+ }
+}
+
+func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
+ parent, found := e.Parent()
+ for found {
+ if parent.Kind() == ast.CallKind {
+ fnName := parent.AsCall().FunctionName()
+ for _, exempt := range exemptFunctions {
+ if exempt == fnName {
+ return true
+ }
+ }
+ }
+ parent, found = parent.Parent()
+ }
+ return false
+}
+
+func isOptionalIndex(i int, optIndices []int32) bool {
+ for _, optInd := range optIndices {
+ if i == int(optInd) {
+ return true
+ }
+ }
+ return false
+}
+
+func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) {
+ iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual))
+}
+
+type nestingLimitValidator struct {
+ limit int
+}
+
+func (v nestingLimitValidator) Name() string {
+ return "cel.lib.std.validate.comprehension_nesting_limit"
+}
+
+func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
+ root := ast.NavigateAST(a)
+ comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
+ if len(comprehensions) <= v.limit {
+ return
+ }
+ for _, comp := range comprehensions {
+ count := 0
+ e := comp
+ hasParent := true
+ for hasParent {
+ // When the expression is not a comprehension, continue to the next ancestor.
+ if e.Kind() != ast.ComprehensionKind {
+ e, hasParent = e.Parent()
+ continue
+ }
+ // When the comprehension has an empty range, continue to the next ancestor
+ // as this comprehension does not have any associated cost.
+ iterRange := e.AsComprehension().IterRange()
+ if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 {
+ e, hasParent = e.Parent()
+ continue
+ }
+ // Otherwise check the nesting limit.
+ count++
+ if count > v.limit {
+ iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit")
+ break
+ }
+ e, hasParent = e.Parent()
+ }
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/checker/BUILD.bazel
new file mode 100644
index 0000000000..678b412a95
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/BUILD.bazel
@@ -0,0 +1,64 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "checker.go",
+ "cost.go",
+ "env.go",
+ "errors.go",
+ "format.go",
+ "mapping.go",
+ "options.go",
+ "printer.go",
+ "scopes.go",
+ "types.go",
+ ],
+ importpath = "github.com/google/cel-go/checker",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/debug:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "checker_test.go",
+ "cost_test.go",
+ "env_test.go",
+ "format_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/types:go_default_library",
+ "//parser:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/checker.go b/hack/tools/vendor/github.com/google/cel-go/checker/checker.go
new file mode 100644
index 0000000000..0603cfa302
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/checker.go
@@ -0,0 +1,711 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package checker defines functions to type-checked a parsed expression
+// against a set of identifier and function declarations.
+package checker
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type checker struct {
+ *ast.AST
+ ast.ExprFactory
+ env *Env
+ errors *typeErrors
+ mappings *mapping
+ freeTypeVarCounter int
+}
+
+// Check performs type checking, giving a typed AST.
+//
+// The input is a parsed AST and an env which encapsulates type binding of variables,
+// declarations of built-in functions, descriptions of protocol buffers, and a registry for
+// errors.
+//
+// Returns a type-checked AST, which might not be usable if there are errors in the error
+// registry.
+func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) {
+ errs := common.NewErrors(source)
+ typeMap := make(map[int64]*types.Type)
+ refMap := make(map[int64]*ast.ReferenceInfo)
+ c := checker{
+ AST: ast.NewCheckedAST(parsed, typeMap, refMap),
+ ExprFactory: ast.NewExprFactory(),
+ env: env,
+ errors: &typeErrors{errs: errs},
+ mappings: newMapping(),
+ freeTypeVarCounter: 0,
+ }
+ c.check(c.Expr())
+
+ // Walk over the final type map substituting any type parameters either by their bound value
+ // or by DYN.
+ for id, t := range c.TypeMap() {
+ c.SetType(id, substitute(c.mappings, t, true))
+ }
+ return c.AST, errs
+}
+
+func (c *checker) check(e ast.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.Kind() {
+ case ast.LiteralKind:
+ literal := ref.Val(e.AsLiteral())
+ switch literal.Type() {
+ case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
+ types.NullType, types.StringType, types.UintType:
+ c.setType(e, literal.Type().(*types.Type))
+ default:
+ c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName())
+ }
+ case ast.IdentKind:
+ c.checkIdent(e)
+ case ast.SelectKind:
+ c.checkSelect(e)
+ case ast.CallKind:
+ c.checkCall(e)
+ case ast.ListKind:
+ c.checkCreateList(e)
+ case ast.MapKind:
+ c.checkCreateMap(e)
+ case ast.StructKind:
+ c.checkCreateStruct(e)
+ case ast.ComprehensionKind:
+ c.checkComprehension(e)
+ default:
+ c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name())
+ }
+}
+
+func (c *checker) checkIdent(e ast.Expr) {
+ identName := e.AsIdent()
+ // Check to see if the identifier is declared.
+ if ident := c.env.LookupIdent(identName); ident != nil {
+ c.setType(e, ident.Type())
+ c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
+ // Overwrite the identifier with its fully qualified name.
+ e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
+ return
+ }
+
+ c.setType(e, types.ErrorType)
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName)
+}
+
+func (c *checker) checkSelect(e ast.Expr) {
+ sel := e.AsSelect()
+ // Before traversing down the tree, try to interpret as qualified name.
+ qname, found := containers.ToQualifiedName(e)
+ if found {
+ ident := c.env.LookupIdent(qname)
+ if ident != nil {
+ // We don't check for a TestOnly expression here since the `found` result is
+ // always going to be false for TestOnly expressions.
+
+ // Rewrite the node to be a variable reference to the resolved fully-qualified
+ // variable name.
+ c.setType(e, ident.Type())
+ c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
+ e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
+ return
+ }
+ }
+
+ resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false)
+ if sel.IsTestOnly() {
+ resultType = types.BoolType
+ }
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkOptSelect(e ast.Expr) {
+ // Collect metadata related to the opt select call packaged by the parser.
+ call := e.AsCall()
+ operand := call.Args()[0]
+ field := call.Args()[1]
+ fieldName, isString := maybeUnwrapString(field)
+ if !isString {
+ c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field)
+ return
+ }
+
+ // Perform type-checking using the field selection logic.
+ resultType := c.checkSelectField(e, operand, fieldName, true)
+ c.setType(e, substitute(c.mappings, resultType, false))
+ c.setReference(e, ast.NewFunctionReference("select_optional_field"))
+}
+
+func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type {
+ // Interpret as field selection, first traversing down the operand.
+ c.check(operand)
+ operandType := substitute(c.mappings, c.getType(operand), false)
+
+ // If the target type is 'optional', unwrap it for the sake of this check.
+ targetType, isOpt := maybeUnwrapOptional(operandType)
+
+ // Assume error type by default as most types do not support field selection.
+ resultType := types.ErrorType
+ switch targetType.Kind() {
+ case types.MapKind:
+ // Maps yield their value type as the selection result type.
+ resultType = targetType.Parameters()[1]
+ case types.StructKind:
+ // Objects yield their field type declaration as the selection result type, but only if
+ // the field is defined.
+ messageType := targetType
+ if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found {
+ resultType = fieldType
+ }
+ case types.TypeParamKind:
+ // Set the operand type to DYN to prevent assignment to a potentially incorrect type
+ // at a later point in type-checking. The isAssignable call will update the type
+ // substitutions for the type param under the covers.
+ c.isAssignable(types.DynType, targetType)
+ // Also, set the result type to DYN.
+ resultType = types.DynType
+ default:
+ // Dynamic / error values are treated as DYN type. Errors are handled this way as well
+ // in order to allow forward progress on the check.
+ if !isDynOrError(targetType) {
+ c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType)
+ }
+ resultType = types.DynType
+ }
+
+ // If the target type was optional coming in, then the result must be optional going out.
+ if isOpt || optional {
+ return types.NewOptionalType(resultType)
+ }
+ return resultType
+}
+
+func (c *checker) checkCall(e ast.Expr) {
+ // Note: similar logic exists within the `interpreter/planner.go`. If making changes here
+ // please consider the impact on planner.go and consolidate implementations or mirror code
+ // as appropriate.
+ call := e.AsCall()
+ fnName := call.FunctionName()
+ if fnName == operators.OptSelect {
+ c.checkOptSelect(e)
+ return
+ }
+
+ args := call.Args()
+ // Traverse arguments.
+ for _, arg := range args {
+ c.check(arg)
+ }
+
+ // Regular static call with simple name.
+ if !call.IsMemberFunction() {
+ // Check for the existence of the function.
+ fn := c.env.LookupFunction(fnName)
+ if fn == nil {
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Overwrite the function name with its fully qualified resolved name.
+ e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
+ // Check to see whether the overload resolves.
+ c.resolveOverloadOrError(e, fn, nil, args)
+ return
+ }
+
+ // If a receiver 'target' is present, it may either be a receiver function, or a namespaced
+ // function, but not both. Given a.b.c() either a.b.c is a function or c is a function with
+ // target a.b.
+ //
+ // Check whether the target is a namespaced function name.
+ target := call.Target()
+ qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
+ if maybeQualified {
+ maybeQualifiedName := qualifiedPrefix + "." + fnName
+ fn := c.env.LookupFunction(maybeQualifiedName)
+ if fn != nil {
+ // The function name is namespaced and so preserving the target operand would
+ // be an inaccurate representation of the desired evaluation behavior.
+ // Overwrite with fully-qualified resolved function name sans receiver target.
+ e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
+ c.resolveOverloadOrError(e, fn, nil, args)
+ return
+ }
+ }
+
+ // Regular instance call.
+ c.check(target)
+ fn := c.env.LookupFunction(fnName)
+ // Function found, attempt overload resolution.
+ if fn != nil {
+ c.resolveOverloadOrError(e, fn, target, args)
+ return
+ }
+ // Function name not declared, record error.
+ c.setType(e, types.ErrorType)
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
+}
+
+func (c *checker) resolveOverloadOrError(
+ e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) {
+ // Attempt to resolve the overload.
+ resolution := c.resolveOverload(e, fn, target, args)
+ // No such overload, error noted in the resolveOverload call, type recorded here.
+ if resolution == nil {
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Overload found.
+ c.setType(e, resolution.Type)
+ c.setReference(e, resolution.Reference)
+}
+
+func (c *checker) resolveOverload(
+ call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution {
+
+ var argTypes []*types.Type
+ if target != nil {
+ argTypes = append(argTypes, c.getType(target))
+ }
+ for _, arg := range args {
+ argTypes = append(argTypes, c.getType(arg))
+ }
+
+ var resultType *types.Type
+ var checkedRef *ast.ReferenceInfo
+ for _, overload := range fn.OverloadDecls() {
+ // Determine whether the overload is currently considered.
+ if c.env.isOverloadDisabled(overload.ID()) {
+ continue
+ }
+
+ // Ensure the call style for the overload matches.
+ if (target == nil && overload.IsMemberFunction()) ||
+ (target != nil && !overload.IsMemberFunction()) {
+ // not a compatible call style.
+ continue
+ }
+
+ // Alternative type-checking behavior when the logical operators are compacted into
+ // variadic AST representations.
+ if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr {
+ checkedRef = ast.NewFunctionReference(overload.ID())
+ for i, argType := range argTypes {
+ if !c.isAssignable(argType, types.BoolType) {
+ c.errors.typeMismatch(
+ args[i].ID(),
+ c.locationByID(args[i].ID()),
+ types.BoolType,
+ argType)
+ resultType = types.ErrorType
+ }
+ }
+ if isError(resultType) {
+ return nil
+ }
+ return newResolution(checkedRef, types.BoolType)
+ }
+
+ overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...)
+ typeParams := overload.TypeParams()
+ if len(typeParams) != 0 {
+ // Instantiate overload's type with fresh type variables.
+ substitutions := newMapping()
+ for _, typePar := range typeParams {
+ substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar())
+ }
+ overloadType = substitute(substitutions, overloadType, false)
+ }
+
+ candidateArgTypes := overloadType.Parameters()[1:]
+ if c.isAssignableList(argTypes, candidateArgTypes) {
+ if checkedRef == nil {
+ checkedRef = ast.NewFunctionReference(overload.ID())
+ } else {
+ checkedRef.AddOverload(overload.ID())
+ }
+
+ // First matching overload, determines result type.
+ fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false)
+ if resultType == nil {
+ resultType = fnResultType
+ } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) {
+ resultType = types.DynType
+ }
+ }
+ }
+
+ if resultType == nil {
+ for i, argType := range argTypes {
+ argTypes[i] = substitute(c.mappings, argType, true)
+ }
+ c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil)
+ return nil
+ }
+
+ return newResolution(checkedRef, resultType)
+}
+
+func (c *checker) checkCreateList(e ast.Expr) {
+ create := e.AsList()
+ var elemsType *types.Type
+ optionalIndices := create.OptionalIndices()
+ optionals := make(map[int32]bool, len(optionalIndices))
+ for _, optInd := range optionalIndices {
+ optionals[optInd] = true
+ }
+ for i, e := range create.Elements() {
+ c.check(e)
+ elemType := c.getType(e)
+ if optionals[int32(i)] {
+ var isOptional bool
+ elemType, isOptional = maybeUnwrapOptional(elemType)
+ if !isOptional && !isDyn(elemType) {
+ c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType)
+ }
+ }
+ elemsType = c.joinTypes(e, elemsType, elemType)
+ }
+ if elemsType == nil {
+ // If the list is empty, assign free type var to elem type.
+ elemsType = c.newTypeVar()
+ }
+ c.setType(e, types.NewListType(elemsType))
+}
+
+func (c *checker) checkCreateMap(e ast.Expr) {
+ mapVal := e.AsMap()
+ var mapKeyType *types.Type
+ var mapValueType *types.Type
+ for _, e := range mapVal.Entries() {
+ entry := e.AsMapEntry()
+ key := entry.Key()
+ c.check(key)
+ mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
+
+ val := entry.Value()
+ c.check(val)
+ valType := c.getType(val)
+ if entry.IsOptional() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType)
+ }
+ }
+ mapValueType = c.joinTypes(val, mapValueType, valType)
+ }
+ if mapKeyType == nil {
+ // If the map is empty, assign free type variables to typeKey and value type.
+ mapKeyType = c.newTypeVar()
+ mapValueType = c.newTypeVar()
+ }
+ c.setType(e, types.NewMapType(mapKeyType, mapValueType))
+}
+
+func (c *checker) checkCreateStruct(e ast.Expr) {
+ msgVal := e.AsStruct()
+ // Determine the type of the message.
+ resultType := types.ErrorType
+ ident := c.env.LookupIdent(msgVal.TypeName())
+ if ident == nil {
+ c.errors.undeclaredReference(
+ e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName())
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Ensure the type name is fully qualified in the AST.
+ typeName := ident.Name()
+ if msgVal.TypeName() != typeName {
+ e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields()))
+ msgVal = e.AsStruct()
+ }
+ c.setReference(e, ast.NewIdentReference(typeName, nil))
+ identKind := ident.Type().Kind()
+ if identKind != types.ErrorKind {
+ if identKind != types.TypeKind {
+ c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName())
+ } else {
+ resultType = ident.Type().Parameters()[0]
+ // Backwards compatibility test between well-known types and message types
+ // In this context, the type is being instantiated by its protobuf name which
+ // is not ideal or recommended, but some users expect this to work.
+ if isWellKnownType(resultType) {
+ typeName = getWellKnownTypeName(resultType)
+ } else if resultType.Kind() == types.StructKind {
+ typeName = resultType.DeclaredTypeName()
+ } else {
+ c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName())
+ resultType = types.ErrorType
+ }
+ }
+ }
+ c.setType(e, resultType)
+
+ // Check the field initializers.
+ for _, f := range msgVal.Fields() {
+ field := f.AsStructField()
+ fieldName := field.Name()
+ value := field.Value()
+ c.check(value)
+
+ fieldType := types.ErrorType
+ ft, found := c.lookupFieldType(f.ID(), typeName, fieldName)
+ if found {
+ fieldType = ft
+ }
+
+ valType := c.getType(value)
+ if field.IsOptional() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType)
+ }
+ }
+ if !c.isAssignable(fieldType, valType) {
+ c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType)
+ }
+ }
+}
+
+func (c *checker) checkComprehension(e ast.Expr) {
+ comp := e.AsComprehension()
+ c.check(comp.IterRange())
+ c.check(comp.AccuInit())
+ rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false)
+
+ // Create a scope for the comprehension since it has a local accumulation variable.
+ // This scope will contain the accumulation variable used to compute the result.
+ accuType := c.getType(comp.AccuInit())
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType))
+
+ var varType, var2Type *types.Type
+ switch rangeType.Kind() {
+ case types.ListKind:
+ // varType represents the list element type for one-variable comprehensions.
+ varType = rangeType.Parameters()[0]
+ if comp.HasIterVar2() {
+ // varType represents the list index (int) for two-variable comprehensions,
+ // and var2Type represents the list element type.
+ var2Type = varType
+ varType = types.IntType
+ }
+ case types.MapKind:
+ // varType represents the map entry key for all comprehension types.
+ varType = rangeType.Parameters()[0]
+ if comp.HasIterVar2() {
+ // var2Type represents the map entry value for two-variable comprehensions.
+ var2Type = rangeType.Parameters()[1]
+ }
+ case types.DynKind, types.ErrorKind, types.TypeParamKind:
+ // Set the range type to DYN to prevent assignment to a potentially incorrect type
+ // at a later point in type-checking. The isAssignable call will update the type
+ // substitutions for the type param under the covers.
+ c.isAssignable(types.DynType, rangeType)
+ // Set the range iteration variable to type DYN as well.
+ varType = types.DynType
+ default:
+ c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType)
+ varType = types.ErrorType
+ }
+
+ // Create a block scope for the loop.
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType))
+ if comp.HasIterVar2() {
+ c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type))
+ }
+ // Check the variable references in the condition and step.
+ c.check(comp.LoopCondition())
+ c.assertType(comp.LoopCondition(), types.BoolType)
+ c.check(comp.LoopStep())
+ c.assertType(comp.LoopStep(), accuType)
+ // Exit the loop's block scope before checking the result.
+ c.env = c.env.exitScope()
+ c.check(comp.Result())
+ // Exit the comprehension scope.
+ c.env = c.env.exitScope()
+ c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false))
+}
+
+// Checks compatibility of joined types, and returns the most general common type.
+func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type {
+ if previous == nil {
+ return current
+ }
+ if c.isAssignable(previous, current) {
+ return mostGeneral(previous, current)
+ }
+ if c.dynAggregateLiteralElementTypesEnabled() {
+ return types.DynType
+ }
+ c.errors.typeMismatch(e.ID(), c.location(e), previous, current)
+ return types.ErrorType
+}
+
+func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
+ return c.env.aggLitElemType == dynElementType
+}
+
+func (c *checker) newTypeVar() *types.Type {
+ id := c.freeTypeVarCounter
+ c.freeTypeVarCounter++
+ return types.NewTypeParamType(fmt.Sprintf("_var%d", id))
+}
+
+func (c *checker) isAssignable(t1, t2 *types.Type) bool {
+ subs := isAssignable(c.mappings, t1, t2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
+ subs := isAssignableList(c.mappings, l1, l2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func maybeUnwrapString(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.LiteralKind:
+ literal := e.AsLiteral()
+ switch v := literal.(type) {
+ case types.String:
+ return string(v), true
+ }
+ }
+ return "", false
+}
+
+func (c *checker) setType(e ast.Expr, t *types.Type) {
+ if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) {
+ c.errors.incompatibleType(e.ID(), c.location(e), e, old, t)
+ return
+ }
+ c.SetType(e.ID(), t)
+}
+
+func (c *checker) getType(e ast.Expr) *types.Type {
+ return c.TypeMap()[e.ID()]
+}
+
+func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) {
+ if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) {
+ c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r)
+ return
+ }
+ c.SetReference(e.ID(), r)
+}
+
+func (c *checker) assertType(e ast.Expr, t *types.Type) {
+ if !c.isAssignable(t, c.getType(e)) {
+ c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e))
+ }
+}
+
+type overloadResolution struct {
+ Type *types.Type
+ Reference *ast.ReferenceInfo
+}
+
+func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
+ return &overloadResolution{
+ Reference: r,
+ Type: t,
+ }
+}
+
+func (c *checker) location(e ast.Expr) common.Location {
+ return c.locationByID(e.ID())
+}
+
+func (c *checker) locationByID(id int64) common.Location {
+ return c.SourceInfo().GetStartLocation(id)
+}
+
+func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
+ if _, found := c.env.provider.FindStructType(structType); !found {
+ // This should not happen, anyway, report an error.
+ c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType)
+ return nil, false
+ }
+
+ if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found {
+ return ft.Type, found
+ }
+
+ c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName)
+ return nil, false
+}
+
+func isWellKnownType(t *types.Type) bool {
+ switch t.Kind() {
+ case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind:
+ return true
+ case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind:
+ return t.IsAssignableType(types.NullType)
+ case types.ListKind:
+ return t.Parameters()[0] == types.DynType
+ case types.MapKind:
+ return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType
+ }
+ return false
+}
+
+func getWellKnownTypeName(t *types.Type) string {
+ if name, found := wellKnownTypes[t.Kind()]; found {
+ return name
+ }
+ return ""
+}
+
+var (
+ wellKnownTypes = map[types.Kind]string{
+ types.AnyKind: "google.protobuf.Any",
+ types.BoolKind: "google.protobuf.BoolValue",
+ types.BytesKind: "google.protobuf.BytesValue",
+ types.DoubleKind: "google.protobuf.DoubleValue",
+ types.DurationKind: "google.protobuf.Duration",
+ types.DynKind: "google.protobuf.Value",
+ types.IntKind: "google.protobuf.Int64Value",
+ types.ListKind: "google.protobuf.ListValue",
+ types.NullTypeKind: "google.protobuf.NullValue",
+ types.MapKind: "google.protobuf.Struct",
+ types.StringKind: "google.protobuf.StringValue",
+ types.TimestampKind: "google.protobuf.Timestamp",
+ types.UintKind: "google.protobuf.UInt64Value",
+ }
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/cost.go b/hack/tools/vendor/github.com/google/cel-go/checker/cost.go
new file mode 100644
index 0000000000..04244694d8
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/cost.go
@@ -0,0 +1,705 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+)
+
+// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go
+
+// CostEstimator estimates the sizes of variable length input data and the costs of functions.
+type CostEstimator interface {
+ // EstimateSize returns a SizeEstimate for the given AstNode, or nil if
+ // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function:
+ // length of strings and bytes, number of map entries or number of list items.
+ // EstimateSize is only called for AstNodes where
+ // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size
+ // is already obvious to CEL.
+ EstimateSize(element AstNode) *SizeEstimate
+ // EstimateCallCost returns the estimated cost of an invocation, or nil if
+ // the estimator has no estimate to provide.
+ EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate
+}
+
+// CallEstimate includes a CostEstimate for the call, and an optional estimate of the result object size.
+// The ResultSize should only be provided if the call results in a map, list, string or bytes.
+type CallEstimate struct {
+ CostEstimate
+ ResultSize *SizeEstimate
+}
+
+// AstNode represents an AST node for the purpose of cost estimations.
+type AstNode interface {
+ // Path returns a field path through the provided type declarations to the type of the AstNode, or nil if the AstNode does not
+ // represent type directly reachable from the provided type declarations.
+ // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
+ Path() []string
+ // Type returns the deduced type of the AstNode.
+ Type() *types.Type
+ // Expr returns the expression of the AstNode.
+ Expr() ast.Expr
+ // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
+ // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
+ // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
+ // computed size available.
+ ComputedSize() *SizeEstimate
+}
+
+type astNode struct {
+ path []string
+ t *types.Type
+ expr ast.Expr
+ derivedSize *SizeEstimate
+}
+
+func (e astNode) Path() []string {
+ return e.path
+}
+
+func (e astNode) Type() *types.Type {
+ return e.t
+}
+
+func (e astNode) Expr() ast.Expr {
+ return e.expr
+}
+
+func (e astNode) ComputedSize() *SizeEstimate {
+ if e.derivedSize != nil {
+ return e.derivedSize
+ }
+ var v uint64
+ switch e.expr.Kind() {
+ case ast.LiteralKind:
+ switch ck := e.expr.AsLiteral().(type) {
+ case types.String:
+ // converting to runes here is an O(n) operation, but
+ // this is consistent with how size is computed at runtime,
+ // and how the language definition defines string size
+ v = uint64(len([]rune(ck)))
+ case types.Bytes:
+ v = uint64(len(ck))
+ case types.Bool, types.Double, types.Duration,
+ types.Int, types.Timestamp, types.Uint,
+ types.Null:
+ v = uint64(1)
+ default:
+ return nil
+ }
+ case ast.ListKind:
+ v = uint64(e.expr.AsList().Size())
+ case ast.MapKind:
+ v = uint64(e.expr.AsMap().Size())
+ default:
+ return nil
+ }
+
+ return &SizeEstimate{Min: v, Max: v}
+}
+
+// SizeEstimate represents an estimated size of a variable length string, bytes, map or list.
+type SizeEstimate struct {
+ Min, Max uint64
+}
+
+// Add adds to another SizeEstimate and returns the sum.
+// If add would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate {
+ return SizeEstimate{
+ addUint64NoOverflow(se.Min, sizeEstimate.Min),
+ addUint64NoOverflow(se.Max, sizeEstimate.Max),
+ }
+}
+
+// Multiply multiplies by another SizeEstimate and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) Multiply(sizeEstimate SizeEstimate) SizeEstimate {
+ return SizeEstimate{
+ multiplyUint64NoOverflow(se.Min, sizeEstimate.Min),
+ multiplyUint64NoOverflow(se.Max, sizeEstimate.Max),
+ }
+}
+
+// MultiplyByCostFactor multiplies a SizeEstimate by a cost factor and returns the CostEstimate with the
+// nearest integer of the result, rounded up.
+func (se SizeEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
+ return CostEstimate{
+ multiplyByCostFactor(se.Min, costPerUnit),
+ multiplyByCostFactor(se.Max, costPerUnit),
+ }
+}
+
+// MultiplyByCost multiplies by the cost and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) MultiplyByCost(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ multiplyUint64NoOverflow(se.Min, cost.Min),
+ multiplyUint64NoOverflow(se.Max, cost.Max),
+ }
+}
+
+// Union returns a SizeEstimate that encompasses both input the SizeEstimate.
+func (se SizeEstimate) Union(size SizeEstimate) SizeEstimate {
+ result := se
+ if size.Min < result.Min {
+ result.Min = size.Min
+ }
+ if size.Max > result.Max {
+ result.Max = size.Max
+ }
+ return result
+}
+
+// CostEstimate represents an estimated cost range and provides add and multiply operations
+// that do not overflow.
+type CostEstimate struct {
+ Min, Max uint64
+}
+
+// Add adds the costs and returns the sum.
+// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64.
+func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ addUint64NoOverflow(ce.Min, cost.Min),
+ addUint64NoOverflow(ce.Max, cost.Max),
+ }
+}
+
+// Multiply multiplies by the cost and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ multiplyUint64NoOverflow(ce.Min, cost.Min),
+ multiplyUint64NoOverflow(ce.Max, cost.Max),
+ }
+}
+
+// MultiplyByCostFactor multiplies a CostEstimate by a cost factor and returns the CostEstimate with the
+// nearest integer of the result, rounded up.
+func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
+ return CostEstimate{
+ multiplyByCostFactor(ce.Min, costPerUnit),
+ multiplyByCostFactor(ce.Max, costPerUnit),
+ }
+}
+
+// Union returns a CostEstimate that encompasses both input the CostEstimates.
+func (ce CostEstimate) Union(size CostEstimate) CostEstimate {
+ result := ce
+ if size.Min < result.Min {
+ result.Min = size.Min
+ }
+ if size.Max > result.Max {
+ result.Max = size.Max
+ }
+ return result
+}
+
+// addUint64NoOverflow adds non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
+// is returned.
+func addUint64NoOverflow(x, y uint64) uint64 {
+ if y > 0 && x > math.MaxUint64-y {
+ return math.MaxUint64
+ }
+ return x + y
+}
+
+// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
+// is returned.
+func multiplyUint64NoOverflow(x, y uint64) uint64 {
+ if y != 0 && x > math.MaxUint64/y {
+ return math.MaxUint64
+ }
+ return x * y
+}
+
+// multiplyByFactor multiplies an integer by a cost factor float and returns the nearest integer value, rounded up.
+func multiplyByCostFactor(x uint64, y float64) uint64 {
+ xFloat := float64(x)
+ if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y {
+ return math.MaxUint64
+ }
+ ceil := math.Ceil(xFloat * y)
+ if ceil >= doubleTwoTo64 {
+ return math.MaxUint64
+ }
+ return uint64(ceil)
+}
+
+var (
+ selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost}
+ constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost}
+
+ createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
+ createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost}
+ createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost}
+)
+
+type coster struct {
+ // exprPath maps from Expr Id to field path.
+ exprPath map[int64][]string
+ // iterRanges tracks the iterRange of each iterVar.
+ iterRanges iterRangeScopes
+ // computedSizes tracks the computed sizes of call results.
+ computedSizes map[int64]SizeEstimate
+ checkedAST *ast.AST
+ estimator CostEstimator
+ overloadEstimators map[string]FunctionEstimator
+ // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
+ presenceTestCost CostEstimate
+}
+
+// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
+type iterRangeScopes map[string][]int64
+
+func (vs iterRangeScopes) push(varName string, expr ast.Expr) {
+ vs[varName] = append(vs[varName], expr.ID())
+}
+
+func (vs iterRangeScopes) pop(varName string) {
+ varStack := vs[varName]
+ vs[varName] = varStack[:len(varStack)-1]
+}
+
+func (vs iterRangeScopes) peek(varName string) (int64, bool) {
+ varStack := vs[varName]
+ if len(varStack) > 0 {
+ return varStack[len(varStack)-1], true
+ }
+ return 0, false
+}
+
+// CostOption configures flags which affect cost computations.
+type CostOption func(*coster) error
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+//
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostOption {
+ return func(c *coster) error {
+ if hasCost {
+ c.presenceTestCost = selectAndIdentCost
+ return nil
+ }
+ c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
+ return nil
+ }
+}
+
+// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair.
+type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate
+
+// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID.
+//
+// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to
+// the Cost() call.
+func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption {
+ return func(c *coster) error {
+ c.overloadEstimators[overloadID] = functionCoster
+ return nil
+ }
+}
+
+// Cost estimates the cost of the parsed and type checked CEL expression.
+func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
+ c := &coster{
+ checkedAST: checked,
+ estimator: estimator,
+ overloadEstimators: map[string]FunctionEstimator{},
+ exprPath: map[int64][]string{},
+ iterRanges: map[string][]int64{},
+ computedSizes: map[int64]SizeEstimate{},
+ presenceTestCost: CostEstimate{Min: 1, Max: 1},
+ }
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return CostEstimate{}, err
+ }
+ }
+ return c.cost(checked.Expr()), nil
+}
+
+func (c *coster) cost(e ast.Expr) CostEstimate {
+ if e == nil {
+ return CostEstimate{}
+ }
+ var cost CostEstimate
+ switch e.Kind() {
+ case ast.LiteralKind:
+ cost = constCost
+ case ast.IdentKind:
+ cost = c.costIdent(e)
+ case ast.SelectKind:
+ cost = c.costSelect(e)
+ case ast.CallKind:
+ cost = c.costCall(e)
+ case ast.ListKind:
+ cost = c.costCreateList(e)
+ case ast.MapKind:
+ cost = c.costCreateMap(e)
+ case ast.StructKind:
+ cost = c.costCreateStruct(e)
+ case ast.ComprehensionKind:
+ cost = c.costComprehension(e)
+ default:
+ return CostEstimate{}
+ }
+ return cost
+}
+
+func (c *coster) costIdent(e ast.Expr) CostEstimate {
+ identName := e.AsIdent()
+ // build and track the field path
+ if iterRange, ok := c.iterRanges.peek(identName); ok {
+ switch c.checkedAST.GetType(iterRange).Kind() {
+ case types.ListKind:
+ c.addPath(e, append(c.exprPath[iterRange], "@items"))
+ case types.MapKind:
+ c.addPath(e, append(c.exprPath[iterRange], "@keys"))
+ }
+ } else {
+ c.addPath(e, []string{identName})
+ }
+
+ return selectAndIdentCost
+}
+
+func (c *coster) costSelect(e ast.Expr) CostEstimate {
+ sel := e.AsSelect()
+ var sum CostEstimate
+ if sel.IsTestOnly() {
+ // recurse, but do not add any cost
+ // this is equivalent to how evalTestOnly increments the runtime cost counter
+ // but does not add any additional cost for the qualifier, except here we do
+ // the reverse (ident adds cost)
+ sum = sum.Add(c.presenceTestCost)
+ sum = sum.Add(c.cost(sel.Operand()))
+ return sum
+ }
+ sum = sum.Add(c.cost(sel.Operand()))
+ targetType := c.getType(sel.Operand())
+ switch targetType.Kind() {
+ case types.MapKind, types.StructKind, types.TypeParamKind:
+ sum = sum.Add(selectAndIdentCost)
+ }
+
+ // build and track the field path
+ c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName()))
+
+ return sum
+}
+
+func (c *coster) costCall(e ast.Expr) CostEstimate {
+ call := e.AsCall()
+ args := call.Args()
+
+ var sum CostEstimate
+
+ argTypes := make([]AstNode, len(args))
+ argCosts := make([]CostEstimate, len(args))
+ for i, arg := range args {
+ argCosts[i] = c.cost(arg)
+ argTypes[i] = c.newAstNode(arg)
+ }
+
+ overloadIDs := c.checkedAST.GetOverloadIDs(e.ID())
+ if len(overloadIDs) == 0 {
+ return CostEstimate{}
+ }
+ var targetType AstNode
+ if call.IsMemberFunction() {
+ sum = sum.Add(c.cost(call.Target()))
+ targetType = c.newAstNode(call.Target())
+ }
+ // Pick a cost estimate range that covers all the overload cost estimation ranges
+ fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
+ var resultSize *SizeEstimate
+ for _, overload := range overloadIDs {
+ overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts)
+ fnCost = fnCost.Union(overloadCost.CostEstimate)
+ if overloadCost.ResultSize != nil {
+ if resultSize == nil {
+ resultSize = overloadCost.ResultSize
+ } else {
+ size := resultSize.Union(*overloadCost.ResultSize)
+ resultSize = &size
+ }
+ }
+ // build and track the field path for index operations
+ switch overload {
+ case overloads.IndexList:
+ if len(args) > 0 {
+ c.addPath(e, append(c.getPath(args[0]), "@items"))
+ }
+ case overloads.IndexMap:
+ if len(args) > 0 {
+ c.addPath(e, append(c.getPath(args[0]), "@values"))
+ }
+ }
+ }
+ if resultSize != nil {
+ c.computedSizes[e.ID()] = *resultSize
+ }
+ return sum.Add(fnCost)
+}
+
+func (c *coster) costCreateList(e ast.Expr) CostEstimate {
+ create := e.AsList()
+ var sum CostEstimate
+ for _, e := range create.Elements() {
+ sum = sum.Add(c.cost(e))
+ }
+ return sum.Add(createListBaseCost)
+}
+
+func (c *coster) costCreateMap(e ast.Expr) CostEstimate {
+ mapVal := e.AsMap()
+ var sum CostEstimate
+ for _, ent := range mapVal.Entries() {
+ entry := ent.AsMapEntry()
+ sum = sum.Add(c.cost(entry.Key()))
+ sum = sum.Add(c.cost(entry.Value()))
+ }
+ return sum.Add(createMapBaseCost)
+}
+
+func (c *coster) costCreateStruct(e ast.Expr) CostEstimate {
+ msgVal := e.AsStruct()
+ var sum CostEstimate
+ for _, ent := range msgVal.Fields() {
+ field := ent.AsStructField()
+ sum = sum.Add(c.cost(field.Value()))
+ }
+ return sum.Add(createMessageBaseCost)
+}
+
+func (c *coster) costComprehension(e ast.Expr) CostEstimate {
+ comp := e.AsComprehension()
+ var sum CostEstimate
+ sum = sum.Add(c.cost(comp.IterRange()))
+ sum = sum.Add(c.cost(comp.AccuInit()))
+
+ // Track the iterRange of each IterVar for field path construction
+ c.iterRanges.push(comp.IterVar(), comp.IterRange())
+ loopCost := c.cost(comp.LoopCondition())
+ stepCost := c.cost(comp.LoopStep())
+ c.iterRanges.pop(comp.IterVar())
+ sum = sum.Add(c.cost(comp.Result()))
+ rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange()))
+
+ c.computedSizes[e.ID()] = rangeCnt
+
+ rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
+ sum = sum.Add(rangeCost)
+
+ return sum
+}
+
+func (c *coster) sizeEstimate(t AstNode) SizeEstimate {
+ if l := t.ComputedSize(); l != nil {
+ return *l
+ }
+ if l := c.estimator.EstimateSize(t); l != nil {
+ return *l
+ }
+ // return an estimate of 1 for return types of set
+ // lengths, since strings/bytes/more complex objects could be of
+ // variable length
+ if isScalar(t.Type()) {
+ // TODO: since the logic for size estimation is split between
+ // ComputedSize and isScalar, changing one will likely require changing
+ // the other, so they should be merged in the future if possible
+ return SizeEstimate{Min: 1, Max: 1}
+ }
+ return SizeEstimate{Min: 0, Max: math.MaxUint64}
+}
+
+func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
+ argCostSum := func() CostEstimate {
+ var sum CostEstimate
+ for _, a := range argCosts {
+ sum = sum.Add(a)
+ }
+ return sum
+ }
+ if len(c.overloadEstimators) != 0 {
+ if estimator, found := c.overloadEstimators[overloadID]; found {
+ if est := estimator(c.estimator, target, args); est != nil {
+ callEst := *est
+ return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
+ }
+ }
+ }
+ if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
+ callEst := *est
+ return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
+ }
+ switch overloadID {
+ // O(n) functions
+ case overloads.ExtFormatString:
+ if target != nil {
+ // ResultSize not calculated because we can't bound the max size.
+ return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ case overloads.StringToBytes:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char converts to 4 bytes.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
+ }
+ case overloads.BytesToString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize min is when 4 bytes convert to 1 char.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
+ }
+ case overloads.ExtQuoteString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char is escaped. 2 quote chars always added.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
+ }
+ case overloads.StartsWithString, overloads.EndsWithString:
+ if len(args) == 1 {
+ return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ case overloads.InList:
+ // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
+ // We just assume all list containment checks are O(n).
+ if len(args) == 2 {
+ return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
+ }
+ // O(nm) functions
+ case overloads.MatchesString:
+ // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
+ if target != nil && len(args) == 1 {
+ // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+ // in case where string is empty but regex is still expensive.
+ strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ // We don't know how many expressions are in the regex, just the string length (a huge
+ // improvement here would be to somehow get a count the number of expressions in the regex or
+ // how many states are in the regex state machine and use that to measure regex cost).
+ // For now, we're making a guess that each expression in a regex is typically at least 4 chars
+ // in length.
+ regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
+ return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())}
+ }
+ case overloads.ContainsString:
+ if target != nil && len(args) == 1 {
+ strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())}
+ }
+ case overloads.LogicalOr, overloads.LogicalAnd:
+ lhs := argCosts[0]
+ rhs := argCosts[1]
+ // min cost is min of LHS for short circuited && or ||
+ argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max}
+ return CallEstimate{CostEstimate: argCost}
+ case overloads.Conditional:
+ size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2]))
+ conditionalCost := argCosts[0]
+ ifTrueCost := argCosts[1]
+ ifFalseCost := argCosts[2]
+ argCost := conditionalCost.Add(ifTrueCost.Union(ifFalseCost))
+ return CallEstimate{CostEstimate: argCost, ResultSize: &size}
+ case overloads.AddString, overloads.AddBytes, overloads.AddList:
+ if len(args) == 2 {
+ lhsSize := c.sizeEstimate(args[0])
+ rhsSize := c.sizeEstimate(args[1])
+ resultSize := lhsSize.Add(rhsSize)
+ switch overloadID {
+ case overloads.AddList:
+ // list concatenation is O(1), but we handle it here to track size
+ return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize}
+ default:
+ return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize}
+ }
+ }
+ case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
+ overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
+ overloads.Equals, overloads.NotEquals:
+ lhsCost := c.sizeEstimate(args[0])
+ rhsCost := c.sizeEstimate(args[1])
+ min := uint64(0)
+ smallestMax := lhsCost.Max
+ if rhsCost.Max < smallestMax {
+ smallestMax = rhsCost.Max
+ }
+ if smallestMax > 0 {
+ min = 1
+ }
+ // equality of 2 scalar values results in a cost of 1
+ return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ // O(1) functions
+ // See CostTracker.costCall for more details about O(1) cost calculations
+
+ // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit
+ // which on an Intel xeon 2.20GHz CPU is 50ns.
+ return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
+}
+
+func (c *coster) getType(e ast.Expr) *types.Type {
+ return c.checkedAST.GetType(e.ID())
+}
+
+func (c *coster) getPath(e ast.Expr) []string {
+ return c.exprPath[e.ID()]
+}
+
+func (c *coster) addPath(e ast.Expr, path []string) {
+ c.exprPath[e.ID()] = path
+}
+
+func (c *coster) newAstNode(e ast.Expr) *astNode {
+ path := c.getPath(e)
+ if len(path) > 0 && path[0] == parser.AccumulatorName {
+ // only provide paths to root vars; omit accumulator vars
+ path = nil
+ }
+ var derivedSize *SizeEstimate
+ if size, ok := c.computedSizes[e.ID()]; ok {
+ derivedSize = &size
+ }
+ return &astNode{
+ path: path,
+ t: c.getType(e),
+ expr: e,
+ derivedSize: derivedSize}
+}
+
+// isScalar returns true if the given type is known to be of a constant size at
+// compile time. isScalar will return false for strings (they are variable-width)
+// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time).
+func isScalar(t *types.Type) bool {
+ switch t.Kind() {
+ case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
+ return true
+ }
+ return false
+}
+
+var (
+ doubleTwoTo64 = math.Ldexp(1.0, 64)
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
new file mode 100644
index 0000000000..a6b0be292c
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "decls.go",
+ ],
+ importpath = "github.com/google/cel-go/checker/decls",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/decls/decls.go b/hack/tools/vendor/github.com/google/cel-go/checker/decls/decls.go
new file mode 100644
index 0000000000..c0e5de469f
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -0,0 +1,237 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls provides helpers for creating variable and function declarations.
+package decls
+
+import (
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ // Error type used to communicate issues during type-checking.
+ Error = &exprpb.Type{
+ TypeKind: &exprpb.Type_Error{
+ Error: &emptypb.Empty{}}}
+
+ // Dyn is a top-type used to represent any value.
+ Dyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_Dyn{
+ Dyn: &emptypb.Empty{}}}
+)
+
+// Commonly used types.
+var (
+ Bool = NewPrimitiveType(exprpb.Type_BOOL)
+ Bytes = NewPrimitiveType(exprpb.Type_BYTES)
+ Double = NewPrimitiveType(exprpb.Type_DOUBLE)
+ Int = NewPrimitiveType(exprpb.Type_INT64)
+ Null = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ String = NewPrimitiveType(exprpb.Type_STRING)
+ Uint = NewPrimitiveType(exprpb.Type_UINT64)
+)
+
+// Well-known types.
+// TODO: Replace with an abstract type registry.
+var (
+ Any = NewWellKnownType(exprpb.Type_ANY)
+ Duration = NewWellKnownType(exprpb.Type_DURATION)
+ Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP)
+)
+
+// NewAbstractType creates an abstract type declaration which references a proto
+// message name and may also include type parameters.
+func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_AbstractType_{
+ AbstractType: &exprpb.Type_AbstractType{
+ Name: name,
+ ParameterTypes: paramTypes}}}
+}
+
+// NewOptionalType constructs an abstract type indicating that the parameterized type
+// may be contained within the object.
+func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
+ return NewAbstractType("optional_type", paramType)
+}
+
+// NewFunctionType creates a function invocation contract, typically only used
+// by type-checking steps after overload resolution.
+func NewFunctionType(resultType *exprpb.Type,
+ argTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Function{
+ Function: &exprpb.Type_FunctionType{
+ ResultType: resultType,
+ ArgTypes: argTypes}}}
+}
+
+// NewFunction creates a named function declaration with one or more overloads.
+func NewFunction(name string,
+ overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Function{
+ Function: &exprpb.Decl_FunctionDecl{
+ Overloads: overloads}}}
+}
+
+// NewIdent creates a named identifier declaration with an optional literal
+// value.
+//
+// Literal values are typically only associated with enum identifiers.
+//
+// Deprecated: Use NewVar or NewConst instead.
+func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Ident{
+ Ident: &exprpb.Decl_IdentDecl{
+ Type: t,
+ Value: v}}}
+}
+
+// NewConst creates a constant identifier with a CEL constant literal value.
+func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return NewIdent(name, t, v)
+}
+
+// NewVar creates a variable identifier.
+func NewVar(name string, t *exprpb.Type) *exprpb.Decl {
+ return NewIdent(name, t, nil)
+}
+
+// NewInstanceOverload creates a instance function overload contract.
+// First element of argTypes is instance.
+func NewInstanceOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: true}
+}
+
+// NewListType generates a new list with elements of a certain type.
+func NewListType(elem *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: elem}}}
+}
+
+// NewMapType generates a new map with typed keys and values.
+func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: key,
+ ValueType: value}}}
+}
+
+// NewObjectType creates an object type for a qualified type name.
+func NewObjectType(typeName string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: typeName}}
+}
+
+// NewOverload creates a function overload declaration which contains a unique
+// overload id as well as the expected argument and result types. Overloads
+// must be aggregated within a Function declaration.
+func NewOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: false}
+}
+
+// NewParameterizedInstanceOverload creates a parametric function instance overload type.
+func NewParameterizedInstanceOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: true}
+}
+
+// NewParameterizedOverload creates a parametric function overload type.
+func NewParameterizedOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: false}
+}
+
+// NewPrimitiveType creates a type for a primitive value. See the var declarations
+// for Int, Uint, etc.
+func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{
+ Primitive: primitive}}
+}
+
+// NewTypeType creates a new type designating a type.
+func NewTypeType(nested *exprpb.Type) *exprpb.Type {
+ if nested == nil {
+ // must set the nested field for a valid oneof option
+ nested = &exprpb.Type{}
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: nested}}
+}
+
+// NewTypeParamType creates a type corresponding to a named, contextual parameter.
+func NewTypeParamType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_TypeParam{
+ TypeParam: name}}
+}
+
+// NewWellKnownType creates a type corresponding to a protobuf well-known type
+// value.
+func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{
+ WellKnown: wellKnown}}
+}
+
+// NewWrapperType creates a wrapped primitive type instance. Wrapped types
+// are roughly equivalent to a nullable, or optionally valued type.
+func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
+ primitive := wrapped.GetPrimitive()
+ if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED {
+ // TODO: return an error
+ panic("Wrapped type must be a primitive")
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{
+ Wrapper: primitive}}
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/env.go b/hack/tools/vendor/github.com/google/cel-go/checker/env.go
new file mode 100644
index 0000000000..d5ac05014e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/env.go
@@ -0,0 +1,284 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+)
+
+type aggregateLiteralElementType int
+
+const (
+ dynElementType aggregateLiteralElementType = iota
+ homogenousElementType aggregateLiteralElementType = 1 << iota
+)
+
+var (
+ crossTypeNumericComparisonOverloads = map[string]struct{}{
+ // double <-> int | uint
+ overloads.LessDoubleInt64: {},
+ overloads.LessDoubleUint64: {},
+ overloads.LessEqualsDoubleInt64: {},
+ overloads.LessEqualsDoubleUint64: {},
+ overloads.GreaterDoubleInt64: {},
+ overloads.GreaterDoubleUint64: {},
+ overloads.GreaterEqualsDoubleInt64: {},
+ overloads.GreaterEqualsDoubleUint64: {},
+ // int <-> double | uint
+ overloads.LessInt64Double: {},
+ overloads.LessInt64Uint64: {},
+ overloads.LessEqualsInt64Double: {},
+ overloads.LessEqualsInt64Uint64: {},
+ overloads.GreaterInt64Double: {},
+ overloads.GreaterInt64Uint64: {},
+ overloads.GreaterEqualsInt64Double: {},
+ overloads.GreaterEqualsInt64Uint64: {},
+ // uint <-> double | int
+ overloads.LessUint64Double: {},
+ overloads.LessUint64Int64: {},
+ overloads.LessEqualsUint64Double: {},
+ overloads.LessEqualsUint64Int64: {},
+ overloads.GreaterUint64Double: {},
+ overloads.GreaterUint64Int64: {},
+ overloads.GreaterEqualsUint64Double: {},
+ overloads.GreaterEqualsUint64Int64: {},
+ }
+)
+
+// Env is the environment for type checking.
+//
+// The Env is comprised of a container, type provider, declarations, and other related objects
+// which can be used to assist with type-checking.
+type Env struct {
+ container *containers.Container
+ provider types.Provider
+ declarations *Scopes
+ aggLitElemType aggregateLiteralElementType
+ filteredOverloadIDs map[string]struct{}
+}
+
+// NewEnv returns a new *Env with the given parameters.
+func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) {
+ declarations := newScopes()
+ declarations.Push()
+
+ envOptions := &options{}
+ for _, opt := range opts {
+ if err := opt(envOptions); err != nil {
+ return nil, err
+ }
+ }
+ aggLitElemType := dynElementType
+ if envOptions.homogeneousAggregateLiterals {
+ aggLitElemType = homogenousElementType
+ }
+ filteredOverloadIDs := crossTypeNumericComparisonOverloads
+ if envOptions.crossTypeNumericComparisons {
+ filteredOverloadIDs = make(map[string]struct{})
+ }
+ if envOptions.validatedDeclarations != nil {
+ declarations = envOptions.validatedDeclarations.Copy()
+ }
+ return &Env{
+ container: container,
+ provider: provider,
+ declarations: declarations,
+ aggLitElemType: aggLitElemType,
+ filteredOverloadIDs: filteredOverloadIDs,
+ }, nil
+}
+
+// AddIdents configures the checker with a list of variable declarations.
+//
+// If there are overlapping declarations, the method will error.
+func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, d := range declarations {
+ errMsgs = append(errMsgs, e.addIdent(d))
+ }
+ return formatError(errMsgs)
+}
+
+// AddFunctions configures the checker with a list of function declarations.
+//
+// If there are overlapping declarations, the method will error.
+func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, d := range declarations {
+ errMsgs = append(errMsgs, e.setFunction(d)...)
+ }
+ return formatError(errMsgs)
+}
+
+// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
+// Returns nil if no such identifier is found in the Env.
+func (e *Env) LookupIdent(name string) *decls.VariableDecl {
+ for _, candidate := range e.container.ResolveCandidateNames(name) {
+ if ident := e.declarations.FindIdent(candidate); ident != nil {
+ return ident
+ }
+
+ // Next try to import the name as a reference to a message type. If found,
+ // the declaration is added to the outest (global) scope of the
+ // environment, so next time we can access it faster.
+ if t, found := e.provider.FindStructType(candidate); found {
+ decl := decls.NewVariable(candidate, t)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+
+ if i, found := e.provider.FindIdent(candidate); found {
+ if t, ok := i.(*types.Type); ok {
+ decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t))
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+
+ // Next try to import this as an enum value by splitting the name in a type prefix and
+ // the enum inside.
+ if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
+ decl := decls.NewConstant(candidate, types.IntType, enumValue)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+ return nil
+}
+
+// LookupFunction returns a Decl proto for typeName as a function in env.
+// Returns nil if no such function is found in env.
+func (e *Env) LookupFunction(name string) *decls.FunctionDecl {
+ for _, candidate := range e.container.ResolveCandidateNames(name) {
+ if fn := e.declarations.FindFunction(candidate); fn != nil {
+ return fn
+ }
+ }
+ return nil
+}
+
+// setFunction adds the function Decl to the Env.
+// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
+// If overload overlaps with an existing overload, adds to the errors in the Env instead.
+func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg {
+ errMsgs := make([]errorMsg, 0)
+ current := e.declarations.FindFunction(fn.Name())
+ if current != nil {
+ var err error
+ current, err = current.Merge(fn)
+ if err != nil {
+ return append(errMsgs, errorMsg(err.Error()))
+ }
+ } else {
+ current = fn
+ }
+ for _, overload := range current.OverloadDecls() {
+ for _, macro := range parser.AllMacros {
+ if macro.Function() == current.Name() &&
+ macro.IsReceiverStyle() == overload.IsMemberFunction() &&
+ macro.ArgCount() == len(overload.ArgTypes()) {
+ errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount()))
+ }
+ }
+ if len(errMsgs) > 0 {
+ return errMsgs
+ }
+ }
+ e.declarations.SetFunction(current)
+ return errMsgs
+}
+
+// addIdent adds the Decl to the declarations in the Env.
+// Returns a non-empty errorMsg if the identifier is already declared in the scope.
+func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg {
+ current := e.declarations.FindIdentInScope(decl.Name())
+ if current != nil {
+ if current.DeclarationIsEquivalent(decl) {
+ return ""
+ }
+ return overlappingIdentifierError(decl.Name())
+ }
+ e.declarations.AddIdent(decl)
+ return ""
+}
+
+// isOverloadDisabled returns whether the overloadID is disabled in the current environment.
+func (e *Env) isOverloadDisabled(overloadID string) bool {
+ _, found := e.filteredOverloadIDs[overloadID]
+ return found
+}
+
+// validatedDeclarations returns a reference to the validated variable and function declaration scope stack.
+// must be copied before use.
+func (e *Env) validatedDeclarations() *Scopes {
+ return e.declarations
+}
+
+// enterScope creates a new Env instance with a new innermost declaration scope.
+func (e *Env) enterScope() *Env {
+ childDecls := e.declarations.Push()
+ return &Env{
+ declarations: childDecls,
+ container: e.container,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// exitScope creates a new Env instance with the nearest outer declaration scope.
+func (e *Env) exitScope() *Env {
+ parentDecls := e.declarations.Pop()
+ return &Env{
+ declarations: parentDecls,
+ container: e.container,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// errorMsg is a type alias meant to represent error-based return values which
+// may be accumulated into an error at a later point in execution.
+type errorMsg string
+
+func overlappingIdentifierError(name string) errorMsg {
+ return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
+}
+
+func overlappingMacroError(name string, argCount int) errorMsg {
+ return errorMsg(fmt.Sprintf(
+ "overlapping macro for name '%s' with %d args", name, argCount))
+}
+
+func formatError(errMsgs []errorMsg) error {
+ errStrs := make([]string, 0)
+ if len(errMsgs) > 0 {
+ for i := 0; i < len(errMsgs); i++ {
+ if errMsgs[i] != "" {
+ errStrs = append(errStrs, string(errMsgs[i]))
+ }
+ }
+ }
+ if len(errStrs) > 0 {
+ return fmt.Errorf("%s", strings.Join(errStrs, "\n"))
+ }
+ return nil
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/errors.go b/hack/tools/vendor/github.com/google/cel-go/checker/errors.go
new file mode 100644
index 0000000000..8b3bf0b8b6
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/errors.go
@@ -0,0 +1,88 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+)
+
+// typeErrors is a specialization of Errors.
+type typeErrors struct {
+ errs *common.Errors
+}
+
+func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'",
+ name, FormatCELType(field), FormatCELType(value))
+}
+
+func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) {
+ e.errs.ReportErrorAtID(id, l,
+ "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
+}
+
+func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
+ signature := formatFunctionDeclType(nil, args, isInstance)
+ e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature)
+}
+
+func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
+ FormatCELType(t))
+}
+
+func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) {
+ e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
+}
+
+func (e *typeErrors) notAType(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName)
+}
+
+func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
+}
+
+func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) {
+ e.errs.ReportErrorAtID(id, l,
+ "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
+}
+
+func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t))
+}
+
+func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'",
+ FormatCELType(expected), FormatCELType(actual))
+}
+
+func (e *typeErrors) undefinedField(id int64, l common.Location, field string) {
+ e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field)
+}
+
+func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) {
+ e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container)
+}
+
+func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
+}
+
+func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/format.go b/hack/tools/vendor/github.com/google/cel-go/checker/format.go
new file mode 100644
index 0000000000..95842905e6
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/format.go
@@ -0,0 +1,216 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ kindUnknown = iota + 1
+ kindError
+ kindFunction
+ kindDyn
+ kindPrimitive
+ kindWellKnown
+ kindWrapper
+ kindNull
+ kindAbstract
+ kindType
+ kindList
+ kindMap
+ kindObject
+ kindTypeParam
+)
+
+// FormatCheckedType converts a type message into a string representation.
+func FormatCheckedType(t *exprpb.Type) string {
+ switch kindOf(t) {
+ case kindDyn:
+ return "dyn"
+ case kindFunction:
+ return formatFunctionExprType(t.GetFunction().GetResultType(),
+ t.GetFunction().GetArgTypes(),
+ false)
+ case kindList:
+ return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
+ case kindObject:
+ return t.GetMessageType()
+ case kindMap:
+ return fmt.Sprintf("map(%s, %s)",
+ FormatCheckedType(t.GetMapType().GetKeyType()),
+ FormatCheckedType(t.GetMapType().GetValueType()))
+ case kindNull:
+ return "null"
+ case kindPrimitive:
+ switch t.GetPrimitive() {
+ case exprpb.Type_UINT64:
+ return "uint"
+ case exprpb.Type_INT64:
+ return "int"
+ }
+ return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
+ case kindType:
+ if t.GetType() == nil || t.GetType().GetTypeKind() == nil {
+ return "type"
+ }
+ return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
+ case kindWellKnown:
+ switch t.GetWellKnown() {
+ case exprpb.Type_ANY:
+ return "any"
+ case exprpb.Type_DURATION:
+ return "duration"
+ case exprpb.Type_TIMESTAMP:
+ return "timestamp"
+ }
+ case kindWrapper:
+ return fmt.Sprintf("wrapper(%s)",
+ FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper())))
+ case kindError:
+ return "!error!"
+ case kindTypeParam:
+ return t.GetTypeParam()
+ case kindAbstract:
+ at := t.GetAbstractType()
+ params := at.GetParameterTypes()
+ paramStrs := make([]string, len(params))
+ for i, p := range params {
+ paramStrs[i] = FormatCheckedType(p)
+ }
+ return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
+ }
+ return t.String()
+}
+
+type formatter func(any) string
+
+// FormatCELType formats a types.Type value to a string representation.
+//
+// The type formatting is identical to FormatCheckedType.
+func FormatCELType(t any) string {
+ dt := t.(*types.Type)
+ switch dt.Kind() {
+ case types.AnyKind:
+ return "any"
+ case types.DurationKind:
+ return "duration"
+ case types.ErrorKind:
+ return "!error!"
+ case types.NullTypeKind:
+ return "null"
+ case types.TimestampKind:
+ return "timestamp"
+ case types.TypeParamKind:
+ return dt.TypeName()
+ case types.OpaqueKind:
+ if dt.TypeName() == "function" {
+ // There is no explicit function type in the new types representation, so information like
+ // whether the function is a member function is absent.
+ return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false)
+ }
+ case types.UnspecifiedKind:
+ return ""
+ }
+ if len(dt.Parameters()) == 0 {
+ return dt.DeclaredTypeName()
+ }
+ paramTypeNames := make([]string, 0, len(dt.Parameters()))
+ for _, p := range dt.Parameters() {
+ paramTypeNames = append(paramTypeNames, FormatCELType(p))
+ }
+ return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", "))
+}
+
+func formatExprType(t any) string {
+ if t == nil {
+ return ""
+ }
+ return FormatCheckedType(t.(*exprpb.Type))
+}
+
+func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
+ return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType)
+}
+
+func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string {
+ return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType)
+}
+
+func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string {
+ result := ""
+ if isInstance {
+ target := argTypes[0]
+ argTypes = argTypes[1:]
+ result += format(target)
+ result += "."
+ }
+ result += "("
+ for i, arg := range argTypes {
+ if i > 0 {
+ result += ", "
+ }
+ result += format(arg)
+ }
+ result += ")"
+ rt := format(resultType)
+ if rt != "" {
+ result += " -> "
+ result += rt
+ }
+ return result
+}
+
+// kindOf returns the kind of the type as defined in the checked.proto.
+func kindOf(t *exprpb.Type) int {
+ if t == nil || t.TypeKind == nil {
+ return kindUnknown
+ }
+ switch t.GetTypeKind().(type) {
+ case *exprpb.Type_Error:
+ return kindError
+ case *exprpb.Type_Function:
+ return kindFunction
+ case *exprpb.Type_Dyn:
+ return kindDyn
+ case *exprpb.Type_Primitive:
+ return kindPrimitive
+ case *exprpb.Type_WellKnown:
+ return kindWellKnown
+ case *exprpb.Type_Wrapper:
+ return kindWrapper
+ case *exprpb.Type_Null:
+ return kindNull
+ case *exprpb.Type_Type:
+ return kindType
+ case *exprpb.Type_ListType_:
+ return kindList
+ case *exprpb.Type_MapType_:
+ return kindMap
+ case *exprpb.Type_MessageType:
+ return kindObject
+ case *exprpb.Type_TypeParam:
+ return kindTypeParam
+ case *exprpb.Type_AbstractType_:
+ return kindAbstract
+ }
+ return kindUnknown
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/mapping.go b/hack/tools/vendor/github.com/google/cel-go/checker/mapping.go
new file mode 100644
index 0000000000..8163a908a5
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/mapping.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/types"
+)
+
+type mapping struct {
+ mapping map[string]*types.Type
+}
+
+func newMapping() *mapping {
+ return &mapping{
+ mapping: make(map[string]*types.Type),
+ }
+}
+
+func (m *mapping) add(from, to *types.Type) {
+ m.mapping[FormatCELType(from)] = to
+}
+
+func (m *mapping) find(from *types.Type) (*types.Type, bool) {
+ if r, found := m.mapping[FormatCELType(from)]; found {
+ return r, found
+ }
+ return nil, false
+}
+
+func (m *mapping) copy() *mapping {
+ c := newMapping()
+
+ for k, v := range m.mapping {
+ c.mapping[k] = v
+ }
+ return c
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/options.go b/hack/tools/vendor/github.com/google/cel-go/checker/options.go
new file mode 100644
index 0000000000..0560c3813c
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/options.go
@@ -0,0 +1,42 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+type options struct {
+ crossTypeNumericComparisons bool
+ homogeneousAggregateLiterals bool
+ validatedDeclarations *Scopes
+}
+
+// Option is a functional option for configuring the type-checker
+type Option func(*options) error
+
+// CrossTypeNumericComparisons toggles type-checker support for numeric comparisons across type
+// See https://github.com/google/cel-spec/wiki/proposal-210 for more details.
+func CrossTypeNumericComparisons(enabled bool) Option {
+ return func(opts *options) error {
+ opts.crossTypeNumericComparisons = enabled
+ return nil
+ }
+}
+
+// ValidatedDeclarations provides a references to validated declarations which will be copied
+// into new checker instances.
+func ValidatedDeclarations(env *Env) Option {
+ return func(opts *options) error {
+ opts.validatedDeclarations = env.validatedDeclarations()
+ return nil
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/printer.go b/hack/tools/vendor/github.com/google/cel-go/checker/printer.go
new file mode 100644
index 0000000000..7a3984f02c
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/printer.go
@@ -0,0 +1,74 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "sort"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/debug"
+)
+
+type semanticAdorner struct {
+ checked *ast.AST
+}
+
+var _ debug.Adorner = &semanticAdorner{}
+
+func (a *semanticAdorner) GetMetadata(elem any) string {
+ result := ""
+ e, isExpr := elem.(ast.Expr)
+ if !isExpr {
+ return result
+ }
+ t := a.checked.TypeMap()[e.ID()]
+ if t != nil {
+ result += "~"
+ result += FormatCELType(t)
+ }
+
+ switch e.Kind() {
+ case ast.IdentKind,
+ ast.CallKind,
+ ast.ListKind,
+ ast.StructKind,
+ ast.SelectKind:
+ if ref, found := a.checked.ReferenceMap()[e.ID()]; found {
+ if len(ref.OverloadIDs) == 0 {
+ result += "^" + ref.Name
+ } else {
+ sort.Strings(ref.OverloadIDs)
+ for i, overload := range ref.OverloadIDs {
+ if i == 0 {
+ result += "^"
+ } else {
+ result += "|"
+ }
+ result += overload
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// Print returns a string representation of the Expr message,
+// annotated with types from the CheckedExpr. The Expr must
+// be a sub-expression embedded in the CheckedExpr.
+func Print(e ast.Expr, checked *ast.AST) string {
+ a := &semanticAdorner{checked: checked}
+ return debug.ToAdornedDebugString(e, a)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/scopes.go b/hack/tools/vendor/github.com/google/cel-go/checker/scopes.go
new file mode 100644
index 0000000000..8bb73ddb6a
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/scopes.go
@@ -0,0 +1,147 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/decls"
+)
+
+// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
+// identifiers in scope and an optional parent representing outer scopes.
+// Each Groups value is a mapping of names to Decls in the ident and function namespaces.
+// Lookups are performed such that bindings in inner scopes shadow those in outer scopes.
+type Scopes struct {
+ parent *Scopes
+ scopes *Group
+}
+
+// newScopes creates a new, empty Scopes.
+// Some operations can't be safely performed until a Group is added with Push.
+func newScopes() *Scopes {
+ return &Scopes{
+ scopes: newGroup(),
+ }
+}
+
+// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil.
+func (s *Scopes) Copy() *Scopes {
+ cpy := newScopes()
+ if s == nil {
+ return cpy
+ }
+ if s.parent != nil {
+ cpy.parent = s.parent.Copy()
+ }
+ cpy.scopes = s.scopes.copy()
+ return cpy
+}
+
+// Push creates a new Scopes value which references the current Scope as its parent.
+func (s *Scopes) Push() *Scopes {
+ return &Scopes{
+ parent: s,
+ scopes: newGroup(),
+ }
+}
+
+// Pop returns the parent Scopes value for the current scope, or the current scope if the parent
+// is nil.
+func (s *Scopes) Pop() *Scopes {
+ if s.parent != nil {
+ return s.parent
+ }
+ // TODO: Consider whether this should be an error / panic.
+ return s
+}
+
+// AddIdent adds the ident Decl in the current scope.
+// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
+func (s *Scopes) AddIdent(decl *decls.VariableDecl) {
+ s.scopes.idents[decl.Name()] = decl
+}
+
+// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
+// found.
+// Note: The search is performed from innermost to outermost.
+func (s *Scopes) FindIdent(name string) *decls.VariableDecl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ if s.parent != nil {
+ return s.parent.FindIdent(name)
+ }
+ return nil
+}
+
+// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
+// nil if one does not exist.
+// Note: The search is only performed on the current scope and does not search outer scopes.
+func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ return nil
+}
+
+// SetFunction adds the function Decl to the current scope.
+// Note: Any previous entry for a function in the current scope with the same name is overwritten.
+func (s *Scopes) SetFunction(fn *decls.FunctionDecl) {
+ s.scopes.functions[fn.Name()] = fn
+}
+
+// FindFunction finds the first function Decl with a matching name in Scopes.
+// The search is performed from innermost to outermost.
+// Returns nil if no such function in Scopes.
+func (s *Scopes) FindFunction(name string) *decls.FunctionDecl {
+ if fn, found := s.scopes.functions[name]; found {
+ return fn
+ }
+ if s.parent != nil {
+ return s.parent.FindFunction(name)
+ }
+ return nil
+}
+
+// Group is a set of Decls that is pushed on or popped off a Scopes as a unit.
+// Contains separate namespaces for identifier and function Decls.
+// (Should be named "Scope" perhaps?)
+type Group struct {
+ idents map[string]*decls.VariableDecl
+ functions map[string]*decls.FunctionDecl
+}
+
+// copy creates a new Group instance with a shallow copy of the variables and functions.
+// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write.
+func (g *Group) copy() *Group {
+ cpy := &Group{
+ idents: make(map[string]*decls.VariableDecl, len(g.idents)),
+ functions: make(map[string]*decls.FunctionDecl, len(g.functions)),
+ }
+ for n, id := range g.idents {
+ cpy.idents[n] = id
+ }
+ for n, fn := range g.functions {
+ cpy.functions[n] = fn
+ }
+ return cpy
+}
+
+// newGroup creates a new Group with empty maps for identifiers and functions.
+func newGroup() *Group {
+ return &Group{
+ idents: make(map[string]*decls.VariableDecl),
+ functions: make(map[string]*decls.FunctionDecl),
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/types.go b/hack/tools/vendor/github.com/google/cel-go/checker/types.go
new file mode 100644
index 0000000000..4c65b2737c
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/checker/types.go
@@ -0,0 +1,314 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/types"
+)
+
+// isDyn returns true if the input t is either type DYN or a well-known ANY message.
+func isDyn(t *types.Type) bool {
+ // Note: object type values that are well-known and map to a DYN value in practice
+ // are sanitized prior to being added to the environment.
+ switch t.Kind() {
+ case types.DynKind, types.AnyKind:
+ return true
+ default:
+ return false
+ }
+}
+
+// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
+func isDynOrError(t *types.Type) bool {
+ return isError(t) || isDyn(t)
+}
+
+func isError(t *types.Type) bool {
+ return t.Kind() == types.ErrorKind
+}
+
+func isOptional(t *types.Type) bool {
+ if t.Kind() == types.OpaqueKind {
+ return t.TypeName() == "optional_type"
+ }
+ return false
+}
+
+func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) {
+ if isOptional(t) {
+ return t.Parameters()[0], true
+ }
+ return t, false
+}
+
+// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
+// A type is less specific if it matches the other type using the DYN type.
+func isEqualOrLessSpecific(t1, t2 *types.Type) bool {
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ // The first type is less specific.
+ if isDyn(t1) || kind1 == types.TypeParamKind {
+ return true
+ }
+ // The first type is not less specific.
+ if isDyn(t2) || kind2 == types.TypeParamKind {
+ return false
+ }
+ // Types must be of the same kind to be equal.
+ if kind1 != kind2 {
+ return false
+ }
+
+ // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
+ // order to return true.
+ switch kind1 {
+ case types.OpaqueKind:
+ if t1.TypeName() != t2.TypeName() ||
+ len(t1.Parameters()) != len(t2.Parameters()) {
+ return false
+ }
+ for i, p1 := range t1.Parameters() {
+ if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) {
+ return false
+ }
+ }
+ return true
+ case types.ListKind:
+ return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0])
+ case types.MapKind:
+ return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) &&
+ isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1])
+ case types.TypeKind:
+ return true
+ default:
+ return t1.IsExactType(t2)
+ }
+}
+
+// / internalIsAssignable returns true if t1 is assignable to t2.
+func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool {
+ // Process type parameters.
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ if kind2 == types.TypeParamKind {
+ // If t2 is a valid type substitution for t1, return true.
+ valid, t2HasSub := isValidTypeSubstitution(m, t1, t2)
+ if valid {
+ return true
+ }
+ // If t2 is not a valid type sub for t1, and already has a known substitution return false
+ // since it is not possible for t1 to be a substitution for t2.
+ if !valid && t2HasSub {
+ return false
+ }
+ // Otherwise, fall through to check whether t1 is a possible substitution for t2.
+ }
+ if kind1 == types.TypeParamKind {
+ // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the
+ // possible type substitutions have been searched in both directions.
+ valid, _ := isValidTypeSubstitution(m, t2, t1)
+ return valid
+ }
+
+ // Next check for wildcard types.
+ if isDynOrError(t1) || isDynOrError(t2) {
+ return true
+ }
+ // Preserve the nullness checks of the legacy type-checker.
+ if kind1 == types.NullTypeKind {
+ return internalIsAssignableNull(t2)
+ }
+ if kind2 == types.NullTypeKind {
+ return internalIsAssignableNull(t1)
+ }
+
+ // Test for when the types do not need to agree, but are more specific than dyn.
+ switch kind1 {
+ case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind,
+ types.AnyKind, types.DurationKind, types.TimestampKind,
+ types.StructKind:
+ // Test whether t2 is assignable from t1. The order of this check won't usually matter;
+ // however, there may be cases where type capabilities are expanded beyond what is supported
+ // in the current common/types package. For example, an interface designation for a group of
+ // Struct types.
+ return t2.IsAssignableType(t1)
+ case types.TypeKind:
+ return kind2 == types.TypeKind
+ case types.OpaqueKind, types.ListKind, types.MapKind:
+ return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() &&
+ internalIsAssignableList(m, t1.Parameters(), t2.Parameters())
+ default:
+ return false
+ }
+}
+
+// isValidTypeSubstitution returns whether t2 (or its type substitution) is a valid type
+// substitution for t1, and whether t2 has a type substitution in mapping m.
+//
+// The type t2 is a valid substitution for t1 if any of the following statements is true
+// - t2 has a type substitution (t2sub) equal to t1
+// - t2 has a type substitution (t2sub) assignable to t1
+// - t2 does not occur within t1.
+func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) {
+ // Early return if the t1 and t2 are the same instance.
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ if kind1 == kind2 && t1.IsExactType(t2) {
+ return true, true
+ }
+ if t2Sub, found := m.find(t2); found {
+ // Early return if t1 and t2Sub are the same instance as otherwise the mapping
+ // might mark a type as being a subtitution for itself.
+ if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) {
+ return true, true
+ }
+ // If the types are compatible, pick the more general type and return true
+ if internalIsAssignable(m, t1, t2Sub) {
+ t2New := mostGeneral(t1, t2Sub)
+ // only update the type reference map if the target type does not occur within it.
+ if notReferencedIn(m, t2, t2New) {
+ m.add(t2, t2New)
+ }
+ // acknowledge the type agreement, and that the substitution is already tracked.
+ return true, true
+ }
+ return false, true
+ }
+ if notReferencedIn(m, t2, t1) {
+ m.add(t2, t1)
+ return true, false
+ }
+ return false, false
+}
+
+// internalIsAssignableList returns true if the element types at each index in the list are
+// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
+// assignable.
+func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool {
+ if len(l1) != len(l2) {
+ return false
+ }
+ for i, t1 := range l1 {
+ if !internalIsAssignable(m, t1, l2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// internalIsAssignableNull returns true if the type is nullable.
+func internalIsAssignableNull(t *types.Type) bool {
+ return isLegacyNullable(t) || t.IsAssignableType(types.NullType)
+}
+
+// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation.
+func isLegacyNullable(t *types.Type) bool {
+ switch t.Kind() {
+ case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind:
+ return true
+ }
+ return false
+}
+
+// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
+func isAssignable(m *mapping, t1, t2 *types.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignable(mCopy, t1, t2) {
+ return mCopy
+ }
+ return nil
+}
+
+// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
+func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignableList(mCopy, l1, l2) {
+ return mCopy
+ }
+ return nil
+}
+
+// mostGeneral returns the more general of two types which are known to unify.
+func mostGeneral(t1, t2 *types.Type) *types.Type {
+ if isEqualOrLessSpecific(t1, t2) {
+ return t1
+ }
+ return t2
+}
+
+// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
+// type. This is a standard requirement for type unification, commonly referred to as the "occurs
+// check".
+func notReferencedIn(m *mapping, t, withinType *types.Type) bool {
+ if t.IsExactType(withinType) {
+ return false
+ }
+ withinKind := withinType.Kind()
+ switch withinKind {
+ case types.TypeParamKind:
+ wtSub, found := m.find(withinType)
+ if !found {
+ return true
+ }
+ return notReferencedIn(m, t, wtSub)
+ case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind:
+ for _, pt := range withinType.Parameters() {
+ if !notReferencedIn(m, t, pt) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
+
+// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
+// parameters are replaced by DYN if typeParamToDyn is true.
+func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type {
+ if tSub, found := m.find(t); found {
+ return substitute(m, tSub, typeParamToDyn)
+ }
+ kind := t.Kind()
+ if typeParamToDyn && kind == types.TypeParamKind {
+ return types.DynType
+ }
+ switch kind {
+ case types.OpaqueKind:
+ return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...)
+ case types.ListKind:
+ return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn))
+ case types.MapKind:
+ return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn),
+ substitute(m, t.Parameters()[1], typeParamToDyn))
+ case types.TypeKind:
+ if len(t.Parameters()) > 0 {
+ tParam := t.Parameters()[0]
+ return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn))
+ }
+ return t
+ default:
+ return t
+ }
+}
+
+func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type {
+ subParams := make([]*types.Type, len(typeParams))
+ for i, tp := range typeParams {
+ subParams[i] = substitute(m, tp, typeParamToDyn)
+ }
+ return subParams
+}
+
+func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type {
+ return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/BUILD.bazel
new file mode 100644
index 0000000000..eef7f281be
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cost.go",
+ "error.go",
+ "errors.go",
+ "location.go",
+ "source.go",
+ ],
+ importpath = "github.com/google/cel-go/common",
+ deps = [
+ "//common/runes:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "errors_test.go",
+ "source_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
new file mode 100644
index 0000000000..9824f57a9f
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
@@ -0,0 +1,57 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "ast.go",
+ "conversion.go",
+ "expr.go",
+ "factory.go",
+ "navigable.go",
+ ],
+ importpath = "github.com/google/cel-go/common/ast",
+ deps = [
+ "//common:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "@dev_cel_expr//:expr",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "ast_test.go",
+ "conversion_test.go",
+ "expr_test.go",
+ "navigable_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//checker:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//encoding/prototext:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/ast.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/ast.go
new file mode 100644
index 0000000000..b807669d49
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/ast.go
@@ -0,0 +1,457 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ast declares data structures useful for parsed and checked abstract syntax trees
+package ast
+
+import (
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// AST contains a protobuf expression and source info along with CEL-native type and reference information.
+type AST struct {
+ expr Expr
+ sourceInfo *SourceInfo
+ typeMap map[int64]*types.Type
+ refMap map[int64]*ReferenceInfo
+}
+
+// Expr returns the root ast.Expr value in the AST.
+func (a *AST) Expr() Expr {
+ if a == nil {
+ return nilExpr
+ }
+ return a.expr
+}
+
+// SourceInfo returns the source metadata associated with the parse / type-check passes.
+func (a *AST) SourceInfo() *SourceInfo {
+ if a == nil {
+ return nil
+ }
+ return a.sourceInfo
+}
+
+// GetType returns the type for the expression at the given id, if one exists, else types.DynType.
+func (a *AST) GetType(id int64) *types.Type {
+ if t, found := a.TypeMap()[id]; found {
+ return t
+ }
+ return types.DynType
+}
+
+// SetType sets the type of the expression node at the given id.
+func (a *AST) SetType(id int64, t *types.Type) {
+ if a == nil {
+ return
+ }
+ a.typeMap[id] = t
+}
+
+// TypeMap returns the map of expression ids to type-checked types.
+//
+// If the AST is not type-checked, the map will be empty.
+func (a *AST) TypeMap() map[int64]*types.Type {
+ if a == nil {
+ return map[int64]*types.Type{}
+ }
+ return a.typeMap
+}
+
+// GetOverloadIDs returns the set of overload function names for a given expression id.
+//
+// If the expression id is not a function call, or the AST is not type-checked, the result will be empty.
+func (a *AST) GetOverloadIDs(id int64) []string {
+ if ref, found := a.ReferenceMap()[id]; found {
+ return ref.OverloadIDs
+ }
+ return []string{}
+}
+
+// ReferenceMap returns the map of expression id to identifier, constant, and function references.
+func (a *AST) ReferenceMap() map[int64]*ReferenceInfo {
+ if a == nil {
+ return map[int64]*ReferenceInfo{}
+ }
+ return a.refMap
+}
+
+// SetReference adds a reference to the checked AST type map.
+func (a *AST) SetReference(id int64, r *ReferenceInfo) {
+ if a == nil {
+ return
+ }
+ a.refMap[id] = r
+}
+
+// IsChecked returns whether the AST is type-checked.
+func (a *AST) IsChecked() bool {
+ return a != nil && len(a.TypeMap()) > 0
+}
+
+// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value.
+func NewAST(e Expr, sourceInfo *SourceInfo) *AST {
+ if e == nil {
+ e = nilExpr
+ }
+ return &AST{
+ expr: e,
+ sourceInfo: sourceInfo,
+ typeMap: make(map[int64]*types.Type),
+ refMap: make(map[int64]*ReferenceInfo),
+ }
+}
+
+// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata.
+func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST {
+ return &AST{
+ expr: parsed.Expr(),
+ sourceInfo: parsed.SourceInfo(),
+ typeMap: typeMap,
+ refMap: refMap,
+ }
+}
+
+// Copy creates a deep copy of the Expr and SourceInfo values in the input AST.
+//
+// Copies of the Expr value are generated using an internal default ExprFactory.
+func Copy(a *AST) *AST {
+ if a == nil {
+ return nil
+ }
+ e := defaultFactory.CopyExpr(a.expr)
+ if !a.IsChecked() {
+ return NewAST(e, CopySourceInfo(a.SourceInfo()))
+ }
+ typesCopy := make(map[int64]*types.Type, len(a.typeMap))
+ for id, t := range a.typeMap {
+ typesCopy[id] = t
+ }
+ refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap))
+ for id, r := range a.refMap {
+ refsCopy[id] = r
+ }
+ return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy)
+}
+
+// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value.
+func MaxID(a *AST) int64 {
+ visitor := &maxIDVisitor{maxID: 1}
+ PostOrderVisit(a.Expr(), visitor)
+ for id, call := range a.SourceInfo().MacroCalls() {
+ PostOrderVisit(call, visitor)
+ if id > visitor.maxID {
+ visitor.maxID = id + 1
+ }
+ }
+ return visitor.maxID + 1
+}
+
+// NewSourceInfo creates a simple SourceInfo object from an input common.Source value.
+func NewSourceInfo(src common.Source) *SourceInfo {
+ var lineOffsets []int32
+ var desc string
+ baseLine := int32(0)
+ baseCol := int32(0)
+ if src != nil {
+ desc = src.Description()
+ lineOffsets = src.LineOffsets()
+ // Determine whether the source metadata should be computed relative
+ // to a base line and column value. This can be determined by requesting
+ // the location for offset 0 from the source object.
+ if loc, found := src.OffsetLocation(0); found {
+ baseLine = int32(loc.Line()) - 1
+ baseCol = int32(loc.Column())
+ }
+ }
+ return &SourceInfo{
+ desc: desc,
+ lines: lineOffsets,
+ baseLine: baseLine,
+ baseCol: baseCol,
+ offsetRanges: make(map[int64]OffsetRange),
+ macroCalls: make(map[int64]Expr),
+ }
+}
+
+// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo.
+//
+// Copies of macro Expr values are generated using an internal default ExprFactory.
+func CopySourceInfo(info *SourceInfo) *SourceInfo {
+ if info == nil {
+ return nil
+ }
+ rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges))
+ for id, off := range info.offsetRanges {
+ rangesCopy[id] = off
+ }
+ callsCopy := make(map[int64]Expr, len(info.macroCalls))
+ for id, call := range info.macroCalls {
+ callsCopy[id] = defaultFactory.CopyExpr(call)
+ }
+ return &SourceInfo{
+ syntax: info.syntax,
+ desc: info.desc,
+ lines: info.lines,
+ baseLine: info.baseLine,
+ baseCol: info.baseCol,
+ offsetRanges: rangesCopy,
+ macroCalls: callsCopy,
+ }
+}
+
+// SourceInfo records basic information about the expression as a textual input and
+// as a parsed expression value.
+type SourceInfo struct {
+ syntax string
+ desc string
+ lines []int32
+ baseLine int32
+ baseCol int32
+ offsetRanges map[int64]OffsetRange
+ macroCalls map[int64]Expr
+}
+
+// SyntaxVersion returns the syntax version associated with the text expression.
+func (s *SourceInfo) SyntaxVersion() string {
+ if s == nil {
+ return ""
+ }
+ return s.syntax
+}
+
+// Description provides information about where the expression came from.
+func (s *SourceInfo) Description() string {
+ if s == nil {
+ return ""
+ }
+ return s.desc
+}
+
+// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear.
+func (s *SourceInfo) LineOffsets() []int32 {
+ if s == nil {
+ return []int32{}
+ }
+ return s.lines
+}
+
+// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression
+// node where the macro was inserted into the AST, and the ast.Expr value represents the original call
+// signature which was replaced.
+func (s *SourceInfo) MacroCalls() map[int64]Expr {
+ if s == nil {
+ return map[int64]Expr{}
+ }
+ return s.macroCalls
+}
+
+// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via
+// a macro replacement.
+//
+// Note, parsing options must be enabled to track macro calls before this method will return a value.
+func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) {
+ e, found := s.MacroCalls()[id]
+ return e, found
+}
+
+// SetMacroCall records a macro call at a specific location.
+func (s *SourceInfo) SetMacroCall(id int64, e Expr) {
+ if s != nil {
+ s.macroCalls[id] = e
+ }
+}
+
+// ClearMacroCall removes the macro call at the given expression id.
+func (s *SourceInfo) ClearMacroCall(id int64) {
+ if s != nil {
+ delete(s.macroCalls, id)
+ }
+}
+
+// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either:
+// the start and end position in the input stream where the expression occurs, or the start position
+// only. If the range only captures start position, the stop position of the range will be equal to
+// the start.
+func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange {
+ if s == nil {
+ return map[int64]OffsetRange{}
+ }
+ return s.offsetRanges
+}
+
+// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists.
+func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) {
+ if s == nil {
+ return OffsetRange{}, false
+ }
+ o, found := s.offsetRanges[id]
+ return o, found
+}
+
+// SetOffsetRange sets the OffsetRange for the given expression id.
+func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) {
+ if s == nil {
+ return
+ }
+ s.offsetRanges[id] = o
+}
+
+// ClearOffsetRange removes the OffsetRange for the given expression id.
+func (s *SourceInfo) ClearOffsetRange(id int64) {
+ if s != nil {
+ delete(s.offsetRanges, id)
+ }
+}
+
+// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character
+// of the expression node at the id.
+func (s *SourceInfo) GetStartLocation(id int64) common.Location {
+ if o, found := s.GetOffsetRange(id); found {
+ return s.GetLocationByOffset(o.Start)
+ }
+ return common.NoLocation
+}
+
+// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for
+// the expression node at the given id.
+//
+// If the SourceInfo was generated from a serialized protobuf representation, the stop location will
+// be identical to the start location for the expression.
+func (s *SourceInfo) GetStopLocation(id int64) common.Location {
+ if o, found := s.GetOffsetRange(id); found {
+ return s.GetLocationByOffset(o.Stop)
+ }
+ return common.NoLocation
+}
+
+// GetLocationByOffset returns the line and column information for a given character offset.
+func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location {
+ line := 1
+ col := int(offset)
+ for _, lineOffset := range s.LineOffsets() {
+ if lineOffset > offset {
+ break
+ }
+ line++
+ col = int(offset - lineOffset)
+ }
+ return common.NewLocation(line, col)
+}
+
+// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column.
+func (s *SourceInfo) ComputeOffset(line, col int32) int32 {
+ if s != nil {
+ line = s.baseLine + line
+ col = s.baseCol + col
+ }
+ if line == 1 {
+ return col
+ }
+ if line < 1 || line > int32(len(s.LineOffsets())) {
+ return -1
+ }
+ offset := s.LineOffsets()[line-2]
+ return offset + col
+}
+
+// OffsetRange captures the start and stop positions of a section of text in the input expression.
+type OffsetRange struct {
+ Start int32
+ Stop int32
+}
+
+// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
+// either a qualified identifier name, a set of overload ids, or a constant value from an enum.
+type ReferenceInfo struct {
+ Name string
+ OverloadIDs []string
+ Value ref.Val
+}
+
+// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value.
+func NewIdentReference(name string, value ref.Val) *ReferenceInfo {
+ return &ReferenceInfo{Name: name, Value: value}
+}
+
+// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads.
+func NewFunctionReference(overloads ...string) *ReferenceInfo {
+ info := &ReferenceInfo{}
+ for _, id := range overloads {
+ info.AddOverload(id)
+ }
+ return info
+}
+
+// AddOverload appends a function overload ID to the ReferenceInfo.
+func (r *ReferenceInfo) AddOverload(overloadID string) {
+ for _, id := range r.OverloadIDs {
+ if id == overloadID {
+ return
+ }
+ }
+ r.OverloadIDs = append(r.OverloadIDs, overloadID)
+}
+
+// Equals returns whether two references are identical to each other.
+func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
+ if r.Name != other.Name {
+ return false
+ }
+ if len(r.OverloadIDs) != len(other.OverloadIDs) {
+ return false
+ }
+ if len(r.OverloadIDs) != 0 {
+ overloadMap := make(map[string]struct{}, len(r.OverloadIDs))
+ for _, id := range r.OverloadIDs {
+ overloadMap[id] = struct{}{}
+ }
+ for _, id := range other.OverloadIDs {
+ _, found := overloadMap[id]
+ if !found {
+ return false
+ }
+ }
+ }
+ if r.Value == nil && other.Value == nil {
+ return true
+ }
+ if r.Value == nil && other.Value != nil ||
+ r.Value != nil && other.Value == nil ||
+ r.Value.Equal(other.Value) != types.True {
+ return false
+ }
+ return true
+}
+
+type maxIDVisitor struct {
+ maxID int64
+ *baseVisitor
+}
+
+// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed.
+func (v *maxIDVisitor) VisitExpr(e Expr) {
+ if v.maxID < e.ID() {
+ v.maxID = e.ID()
+ }
+}
+
+// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed.
+func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) {
+ if v.maxID < e.ID() {
+ v.maxID = e.ID()
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/conversion.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/conversion.go
new file mode 100644
index 0000000000..435d8f6547
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/conversion.go
@@ -0,0 +1,659 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// ToProto converts an AST to a CheckedExpr protobouf.
+func ToProto(ast *AST) (*exprpb.CheckedExpr, error) {
+ refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap()))
+ for id, ref := range ast.ReferenceMap() {
+ r, err := ReferenceInfoToProto(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap()))
+ for id, typ := range ast.TypeMap() {
+ t, err := types.TypeToExprType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ e, err := ExprToProto(ast.Expr())
+ if err != nil {
+ return nil, err
+ }
+ info, err := SourceInfoToProto(ast.SourceInfo())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.CheckedExpr{
+ Expr: e,
+ SourceInfo: info,
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
+}
+
+// ToAST converts a CheckedExpr protobuf to an AST instance.
+func ToAST(checked *exprpb.CheckedExpr) (*AST, error) {
+ refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
+ for id, ref := range checked.GetReferenceMap() {
+ r, err := ProtoToReferenceInfo(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
+ for id, typ := range checked.GetTypeMap() {
+ t, err := types.ExprTypeToType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ info, err := ProtoToSourceInfo(checked.GetSourceInfo())
+ if err != nil {
+ return nil, err
+ }
+ root, err := ProtoToExpr(checked.GetExpr())
+ if err != nil {
+ return nil, err
+ }
+ ast := NewCheckedAST(NewAST(root, info), typeMap, refMap)
+ return ast, nil
+}
+
+// ProtoToExpr converts a protobuf Expr value to an ast.Expr value.
+func ProtoToExpr(e *exprpb.Expr) (Expr, error) {
+ factory := NewExprFactory()
+ return exprInternal(factory, e)
+}
+
+// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr
+func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ factory := NewExprFactory()
+ switch e.GetKeyKind().(type) {
+ case *exprpb.Expr_CreateStruct_Entry_FieldKey:
+ return exprStructField(factory, e.GetId(), e)
+ case *exprpb.Expr_CreateStruct_Entry_MapKey:
+ return exprMapEntry(factory, e.GetId(), e)
+ }
+ return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
+}
+
+func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) {
+ id := e.GetId()
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
+ return exprCall(factory, id, e.GetCallExpr())
+ case *exprpb.Expr_ComprehensionExpr:
+ return exprComprehension(factory, id, e.GetComprehensionExpr())
+ case *exprpb.Expr_ConstExpr:
+ return exprLiteral(factory, id, e.GetConstExpr())
+ case *exprpb.Expr_IdentExpr:
+ return exprIdent(factory, id, e.GetIdentExpr())
+ case *exprpb.Expr_ListExpr:
+ return exprList(factory, id, e.GetListExpr())
+ case *exprpb.Expr_SelectExpr:
+ return exprSelect(factory, id, e.GetSelectExpr())
+ case *exprpb.Expr_StructExpr:
+ s := e.GetStructExpr()
+ if s.GetMessageName() != "" {
+ return exprStruct(factory, id, s)
+ }
+ return exprMap(factory, id, s)
+ }
+ return factory.NewUnspecifiedExpr(id), nil
+}
+
+func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) {
+ var err error
+ args := make([]Expr, len(call.GetArgs()))
+ for i, a := range call.GetArgs() {
+ args[i], err = exprInternal(factory, a)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if call.GetTarget() == nil {
+ return factory.NewCall(id, call.GetFunction(), args...), nil
+ }
+
+ target, err := exprInternal(factory, call.GetTarget())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil
+}
+
+func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) {
+ iterRange, err := exprInternal(factory, comp.GetIterRange())
+ if err != nil {
+ return nil, err
+ }
+ accuInit, err := exprInternal(factory, comp.GetAccuInit())
+ if err != nil {
+ return nil, err
+ }
+ loopCond, err := exprInternal(factory, comp.GetLoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ loopStep, err := exprInternal(factory, comp.GetLoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := exprInternal(factory, comp.GetResult())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewComprehensionTwoVar(id,
+ iterRange,
+ comp.GetIterVar(),
+ comp.GetIterVar2(),
+ comp.GetAccuVar(),
+ accuInit,
+ loopCond,
+ loopStep,
+ result), nil
+}
+
+func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) {
+ val, err := ConstantToVal(c)
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewLiteral(id, val), nil
+}
+
+func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) {
+ return factory.NewIdent(id, i.GetName()), nil
+}
+
+func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) {
+ elems := make([]Expr, len(l.GetElements()))
+ for i, e := range l.GetElements() {
+ elem, err := exprInternal(factory, e)
+ if err != nil {
+ return nil, err
+ }
+ elems[i] = elem
+ }
+ return factory.NewList(id, elems, l.GetOptionalIndices()), nil
+}
+
+func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
+ entries := make([]EntryExpr, len(s.GetEntries()))
+ var err error
+ for i, entry := range s.GetEntries() {
+ entries[i], err = exprMapEntry(factory, entry.GetId(), entry)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return factory.NewMap(id, entries), nil
+}
+
+func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ k, err := exprInternal(factory, e.GetMapKey())
+ if err != nil {
+ return nil, err
+ }
+ v, err := exprInternal(factory, e.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil
+}
+
+func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) {
+ op, err := exprInternal(factory, s.GetOperand())
+ if err != nil {
+ return nil, err
+ }
+ if s.GetTestOnly() {
+ return factory.NewPresenceTest(id, op, s.GetField()), nil
+ }
+ return factory.NewSelect(id, op, s.GetField()), nil
+}
+
+func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
+ fields := make([]EntryExpr, len(s.GetEntries()))
+ var err error
+ for i, field := range s.GetEntries() {
+ fields[i], err = exprStructField(factory, field.GetId(), field)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return factory.NewStruct(id, s.GetMessageName(), fields), nil
+}
+
+func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ v, err := exprInternal(factory, f.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil
+}
+
+// ExprToProto serializes an ast.Expr value to a protobuf Expr representation.
+func ExprToProto(e Expr) (*exprpb.Expr, error) {
+ if e == nil {
+ return &exprpb.Expr{}, nil
+ }
+ switch e.Kind() {
+ case CallKind:
+ return protoCall(e.ID(), e.AsCall())
+ case ComprehensionKind:
+ return protoComprehension(e.ID(), e.AsComprehension())
+ case IdentKind:
+ return protoIdent(e.ID(), e.AsIdent())
+ case ListKind:
+ return protoList(e.ID(), e.AsList())
+ case LiteralKind:
+ return protoLiteral(e.ID(), e.AsLiteral())
+ case MapKind:
+ return protoMap(e.ID(), e.AsMap())
+ case SelectKind:
+ return protoSelect(e.ID(), e.AsSelect())
+ case StructKind:
+ return protoStruct(e.ID(), e.AsStruct())
+ case UnspecifiedExprKind:
+ // Handle the case where a macro reference may be getting translated.
+ // A nested macro 'pointer' is a non-zero expression id with no kind set.
+ if e.ID() != 0 {
+ return &exprpb.Expr{Id: e.ID()}, nil
+ }
+ return &exprpb.Expr{}, nil
+ }
+ return nil, fmt.Errorf("unsupported expr kind: %v", e)
+}
+
+// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry
+func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) {
+ switch e.Kind() {
+ case MapEntryKind:
+ return protoMapEntry(e.ID(), e.AsMapEntry())
+ case StructFieldKind:
+ return protoStructField(e.ID(), e.AsStructField())
+ case UnspecifiedEntryExprKind:
+ return &exprpb.Expr_CreateStruct_Entry{}, nil
+ }
+ return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
+}
+
+func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) {
+ var err error
+ var target *exprpb.Expr
+ if call.IsMemberFunction() {
+ target, err = ExprToProto(call.Target())
+ if err != nil {
+ return nil, err
+ }
+ }
+ callArgs := call.Args()
+ args := make([]*exprpb.Expr, len(callArgs))
+ for i, a := range callArgs {
+ args[i], err = ExprToProto(a)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: call.FunctionName(),
+ Target: target,
+ Args: args,
+ },
+ },
+ }, nil
+}
+
+func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) {
+ iterRange, err := ExprToProto(comp.IterRange())
+ if err != nil {
+ return nil, err
+ }
+ accuInit, err := ExprToProto(comp.AccuInit())
+ if err != nil {
+ return nil, err
+ }
+ loopCond, err := ExprToProto(comp.LoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ loopStep, err := ExprToProto(comp.LoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := ExprToProto(comp.Result())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterVar: comp.IterVar(),
+ IterVar2: comp.IterVar2(),
+ IterRange: iterRange,
+ AccuVar: comp.AccuVar(),
+ AccuInit: accuInit,
+ LoopCondition: loopCond,
+ LoopStep: loopStep,
+ Result: result,
+ },
+ },
+ }, nil
+}
+
+func protoIdent(id int64, name string) (*exprpb.Expr, error) {
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_IdentExpr{
+ IdentExpr: &exprpb.Expr_Ident{
+ Name: name,
+ },
+ },
+ }, nil
+}
+
+func protoList(id int64, list ListExpr) (*exprpb.Expr, error) {
+ var err error
+ elems := make([]*exprpb.Expr, list.Size())
+ for i, e := range list.Elements() {
+ elems[i], err = ExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elems,
+ OptionalIndices: list.OptionalIndices(),
+ },
+ },
+ }, nil
+}
+
+func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) {
+ c, err := ValToConstant(val)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ConstExpr{
+ ConstExpr: c,
+ },
+ }, nil
+}
+
+func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) {
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries()))
+ var err error
+ for i, e := range m.Entries() {
+ entries[i], err = EntryExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ Entries: entries,
+ },
+ },
+ }, nil
+}
+
+func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) {
+ k, err := ExprToProto(e.Key())
+ if err != nil {
+ return nil, err
+ }
+ v, err := ExprToProto(e.Value())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: id,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: k,
+ },
+ Value: v,
+ OptionalEntry: e.IsOptional(),
+ }, nil
+}
+
+func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) {
+ op, err := ExprToProto(s.Operand())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{
+ Operand: op,
+ Field: s.FieldName(),
+ TestOnly: s.IsTestOnly(),
+ },
+ },
+ }, nil
+}
+
+func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) {
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields()))
+ var err error
+ for i, e := range s.Fields() {
+ entries[i], err = EntryExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: s.TypeName(),
+ Entries: entries,
+ },
+ },
+ }, nil
+}
+
+func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) {
+ v, err := ExprToProto(f.Value())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: id,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: f.Name(),
+ },
+ Value: v,
+ OptionalEntry: f.IsOptional(),
+ }, nil
+}
+
+// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object.
+func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) {
+ if info == nil {
+ return &exprpb.SourceInfo{}, nil
+ }
+ sourceInfo := &exprpb.SourceInfo{
+ SyntaxVersion: info.SyntaxVersion(),
+ Location: info.Description(),
+ LineOffsets: info.LineOffsets(),
+ Positions: make(map[int64]int32, len(info.OffsetRanges())),
+ MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())),
+ }
+ for id, offset := range info.OffsetRanges() {
+ sourceInfo.Positions[id] = offset.Start
+ }
+ for id, e := range info.MacroCalls() {
+ call, err := ExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ sourceInfo.MacroCalls[id] = call
+ }
+ return sourceInfo, nil
+}
+
+// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value.
+func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) {
+ sourceInfo := &SourceInfo{
+ syntax: info.GetSyntaxVersion(),
+ desc: info.GetLocation(),
+ lines: info.GetLineOffsets(),
+ offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())),
+ macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())),
+ }
+ for id, offset := range info.GetPositions() {
+ sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset})
+ }
+ for id, e := range info.GetMacroCalls() {
+ call, err := ProtoToExpr(e)
+ if err != nil {
+ return nil, err
+ }
+ sourceInfo.SetMacroCall(id, call)
+ }
+ return sourceInfo, nil
+}
+
+// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
+func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) {
+ c, err := ValToConstant(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Reference{
+ Name: info.Name,
+ OverloadId: info.OverloadIDs,
+ Value: c,
+ }, nil
+}
+
+// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
+func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
+ v, err := ConstantToVal(ref.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return &ReferenceInfo{
+ Name: ref.GetName(),
+ OverloadIDs: ref.GetOverloadId(),
+ Value: v,
+ }, nil
+}
+
+// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
+//
+// Only simple scalar types are supported by this method.
+func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
+ if v == nil {
+ return nil, nil
+ }
+ switch v.Type() {
+ case types.BoolType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
+ case types.BytesType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
+ case types.IntType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
+ case types.NullType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
+ case types.StringType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
+ case types.UintType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
+}
+
+// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
+func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
+ return AlphaProtoConstantAsVal(c)
+}
+
+// AlphaProtoConstantAsVal converts a v1alpha1.Constant protobuf to a CEL-native ref.Val.
+func AlphaProtoConstantAsVal(c *exprpb.Constant) (ref.Val, error) {
+ if c == nil {
+ return nil, nil
+ }
+ canonical := &celpb.Constant{}
+ if err := convertProto(c, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoConstantAsVal(canonical)
+}
+
+// ProtoConstantAsVal converts a canonical celpb.Constant protobuf to a CEL-native ref.Val.
+func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) {
+ switch c.GetConstantKind().(type) {
+ case *celpb.Constant_BoolValue:
+ return types.Bool(c.GetBoolValue()), nil
+ case *celpb.Constant_BytesValue:
+ return types.Bytes(c.GetBytesValue()), nil
+ case *celpb.Constant_DoubleValue:
+ return types.Double(c.GetDoubleValue()), nil
+ case *celpb.Constant_Int64Value:
+ return types.Int(c.GetInt64Value()), nil
+ case *celpb.Constant_NullValue:
+ return types.NullValue, nil
+ case *celpb.Constant_StringValue:
+ return types.String(c.GetStringValue()), nil
+ case *celpb.Constant_Uint64Value:
+ return types.Uint(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/expr.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/expr.go
new file mode 100644
index 0000000000..9f55cb3b9f
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/expr.go
@@ -0,0 +1,884 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// ExprKind represents the expression node kind.
+type ExprKind int
+
+const (
+ // UnspecifiedExprKind represents an unset expression with no specified properties.
+ UnspecifiedExprKind ExprKind = iota
+
+ // CallKind represents a function call.
+ CallKind
+
+ // ComprehensionKind represents a comprehension expression generated by a macro.
+ ComprehensionKind
+
+ // IdentKind represents a simple variable, constant, or type identifier.
+ IdentKind
+
+ // ListKind represents a list literal expression.
+ ListKind
+
+ // LiteralKind represents a primitive scalar literal.
+ LiteralKind
+
+ // MapKind represents a map literal expression.
+ MapKind
+
+ // SelectKind represents a field selection expression.
+ SelectKind
+
+ // StructKind represents a struct literal expression.
+ StructKind
+)
+
+// Expr represents the base expression node in a CEL abstract syntax tree.
+//
+// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types
+// as indicated by the `As` methods.
+type Expr interface {
+ // ID of the expression as it appears in the AST
+ ID() int64
+
+ // Kind of the expression node. See ExprKind for the valid enum values.
+ Kind() ExprKind
+
+ // AsCall adapts the expr into a CallExpr
+ //
+ // The Kind() must be equal to a CallKind for the conversion to be well-defined.
+ AsCall() CallExpr
+
+ // AsComprehension adapts the expr into a ComprehensionExpr.
+ //
+ // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
+ AsComprehension() ComprehensionExpr
+
+ // AsIdent adapts the expr into an identifier string.
+ //
+ // The Kind() must be equal to an IdentKind for the conversion to be well-defined.
+ AsIdent() string
+
+ // AsLiteral adapts the expr into a constant ref.Val.
+ //
+ // The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
+ AsLiteral() ref.Val
+
+ // AsList adapts the expr into a ListExpr.
+ //
+ // The Kind() must be equal to a ListKind for the conversion to be well-defined.
+ AsList() ListExpr
+
+ // AsMap adapts the expr into a MapExpr.
+ //
+ // The Kind() must be equal to a MapKind for the conversion to be well-defined.
+ AsMap() MapExpr
+
+ // AsSelect adapts the expr into a SelectExpr.
+ //
+ // The Kind() must be equal to a SelectKind for the conversion to be well-defined.
+ AsSelect() SelectExpr
+
+ // AsStruct adapts the expr into a StructExpr.
+ //
+ // The Kind() must be equal to a StructKind for the conversion to be well-defined.
+ AsStruct() StructExpr
+
+ // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
+ RenumberIDs(IDGenerator)
+
+ // SetKindCase replaces the contents of the current expression with the contents of the other.
+ //
+ // The SetKindCase takes ownership of any expression instances references within the input Expr.
+ // A shallow copy is made of the Expr value itself, but not a deep one.
+ //
+ // This method should only be used during AST rewrites using temporary Expr values.
+ SetKindCase(Expr)
+
+ // isExpr is a marker interface.
+ isExpr()
+}
+
+// EntryExprKind represents the possible EntryExpr kinds.
+type EntryExprKind int
+
+const (
+ // UnspecifiedEntryExprKind indicates that the entry expr is not set.
+ UnspecifiedEntryExprKind EntryExprKind = iota
+
+ // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions.
+ MapEntryKind
+
+ // StructFieldKind indicates that the entry is a StructField with a field name and initializer
+ // expression.
+ StructFieldKind
+)
+
+// EntryExpr represents the base entry expression in a CEL map or struct literal.
+type EntryExpr interface {
+ // ID of the entry as it appears in the AST.
+ ID() int64
+
+ // Kind of the entry expression node. See EntryExprKind for valid enum values.
+ Kind() EntryExprKind
+
+ // AsMapEntry casts the EntryExpr to a MapEntry.
+ //
+ // The Kind() must be equal to MapEntryKind for the conversion to be well-defined.
+ AsMapEntry() MapEntry
+
+ // AsStructField casts the EntryExpr to a StructField
+ //
+ // The Kind() must be equal to StructFieldKind for the conversion to be well-defined.
+ AsStructField() StructField
+
+ // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
+ RenumberIDs(IDGenerator)
+
+ isEntryExpr()
+}
+
+// IDGenerator produces unique ids suitable for tagging expression nodes
+type IDGenerator func(originalID int64) int64
+
+// CallExpr defines an interface for inspecting a function call and its arguments.
+type CallExpr interface {
+ // FunctionName returns the name of the function.
+ FunctionName() string
+
+ // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function
+ IsMemberFunction() bool
+
+ // Target returns the target of the expression if one is present.
+ Target() Expr
+
+ // Args returns the list of call arguments, excluding the target.
+ Args() []Expr
+
+ // marker interface method
+ isExpr()
+}
+
+// ListExpr defines an interface for inspecting a list literal expression.
+type ListExpr interface {
+ // Elements returns the list elements as navigable expressions.
+ Elements() []Expr
+
+ // OptionalIndicies returns the list of optional indices in the list literal.
+ OptionalIndices() []int32
+
+ // IsOptional indicates whether the given element index is optional.
+ IsOptional(int32) bool
+
+ // Size returns the number of elements in the list.
+ Size() int
+
+ // marker interface method
+ isExpr()
+}
+
+// SelectExpr defines an interface for inspecting a select expression.
+type SelectExpr interface {
+ // Operand returns the selection operand expression.
+ Operand() Expr
+
+ // FieldName returns the field name being selected from the operand.
+ FieldName() string
+
+ // IsTestOnly indicates whether the select expression is a presence test generated by a macro.
+ IsTestOnly() bool
+
+ // marker interface method
+ isExpr()
+}
+
+// MapExpr defines an interface for inspecting a map expression.
+type MapExpr interface {
+ // Entries returns the map key value pairs as EntryExpr values.
+ Entries() []EntryExpr
+
+ // Size returns the number of entries in the map.
+ Size() int
+
+ // marker interface method
+ isExpr()
+}
+
+// MapEntry defines an interface for inspecting a map entry.
+type MapEntry interface {
+ // Key returns the map entry key expression.
+ Key() Expr
+
+ // Value returns the map entry value expression.
+ Value() Expr
+
+ // IsOptional returns whether the entry is optional.
+ IsOptional() bool
+
+ // marker interface method
+ isEntryExpr()
+}
+
+// StructExpr defines an interfaces for inspecting a struct and its field initializers.
+type StructExpr interface {
+ // TypeName returns the struct type name.
+ TypeName() string
+
+ // Fields returns the set of field initializers in the struct expression as EntryExpr values.
+ Fields() []EntryExpr
+
+ // marker interface method
+ isExpr()
+}
+
+// StructField defines an interface for inspecting a struct field initialization.
+type StructField interface {
+ // Name returns the name of the field.
+ Name() string
+
+ // Value returns the field initialization expression.
+ Value() Expr
+
+ // IsOptional returns whether the field is optional.
+ IsOptional() bool
+
+ // marker interface method
+ isEntryExpr()
+}
+
+// ComprehensionExpr defines an interface for inspecting a comprehension expression.
+type ComprehensionExpr interface {
+ // IterRange returns the iteration range expression.
+ IterRange() Expr
+
+ // IterVar returns the iteration variable name.
+ //
+ // For one-variable comprehensions, the iter var refers to the element value
+ // when iterating over a list, or the map key when iterating over a map.
+ //
+ // For two-variable comprehneions, the iter var refers to the list index or the
+ // map key.
+ IterVar() string
+
+ // IterVar2 returns the second iteration variable name.
+ //
+ // When the value is non-empty, the comprehension is a two-variable comprehension.
+ IterVar2() string
+
+ // HasIterVar2 returns true if the second iteration variable is non-empty.
+ HasIterVar2() bool
+
+ // AccuVar returns the accumulation variable name.
+ AccuVar() string
+
+ // AccuInit returns the accumulation variable initialization expression.
+ AccuInit() Expr
+
+ // LoopCondition returns the loop condition expression.
+ LoopCondition() Expr
+
+ // LoopStep returns the loop step expression.
+ LoopStep() Expr
+
+ // Result returns the comprehension result expression.
+ Result() Expr
+
+ // marker interface method
+ isExpr()
+}
+
+var _ Expr = &expr{}
+
+type expr struct {
+ id int64
+ exprKindCase
+}
+
+type exprKindCase interface {
+ Kind() ExprKind
+
+ renumberIDs(IDGenerator)
+
+ isExpr()
+}
+
+func (e *expr) ID() int64 {
+ if e == nil {
+ return 0
+ }
+ return e.id
+}
+
+func (e *expr) Kind() ExprKind {
+ if e == nil || e.exprKindCase == nil {
+ return UnspecifiedExprKind
+ }
+ return e.exprKindCase.Kind()
+}
+
+func (e *expr) AsCall() CallExpr {
+ if e.Kind() != CallKind {
+ return nilCall
+ }
+ return e.exprKindCase.(CallExpr)
+}
+
+func (e *expr) AsComprehension() ComprehensionExpr {
+ if e.Kind() != ComprehensionKind {
+ return nilCompre
+ }
+ return e.exprKindCase.(ComprehensionExpr)
+}
+
+func (e *expr) AsIdent() string {
+ if e.Kind() != IdentKind {
+ return ""
+ }
+ return string(e.exprKindCase.(baseIdentExpr))
+}
+
+func (e *expr) AsLiteral() ref.Val {
+ if e.Kind() != LiteralKind {
+ return nil
+ }
+ return e.exprKindCase.(*baseLiteral).Val
+}
+
+func (e *expr) AsList() ListExpr {
+ if e.Kind() != ListKind {
+ return nilList
+ }
+ return e.exprKindCase.(ListExpr)
+}
+
+func (e *expr) AsMap() MapExpr {
+ if e.Kind() != MapKind {
+ return nilMap
+ }
+ return e.exprKindCase.(MapExpr)
+}
+
+func (e *expr) AsSelect() SelectExpr {
+ if e.Kind() != SelectKind {
+ return nilSel
+ }
+ return e.exprKindCase.(SelectExpr)
+}
+
+func (e *expr) AsStruct() StructExpr {
+ if e.Kind() != StructKind {
+ return nilStruct
+ }
+ return e.exprKindCase.(StructExpr)
+}
+
+func (e *expr) SetKindCase(other Expr) {
+ if e == nil {
+ return
+ }
+ if other == nil {
+ e.exprKindCase = nil
+ return
+ }
+ switch other.Kind() {
+ case CallKind:
+ c := other.AsCall()
+ e.exprKindCase = &baseCallExpr{
+ function: c.FunctionName(),
+ target: c.Target(),
+ args: c.Args(),
+ isMember: c.IsMemberFunction(),
+ }
+ case ComprehensionKind:
+ c := other.AsComprehension()
+ e.exprKindCase = &baseComprehensionExpr{
+ iterRange: c.IterRange(),
+ iterVar: c.IterVar(),
+ iterVar2: c.IterVar2(),
+ accuVar: c.AccuVar(),
+ accuInit: c.AccuInit(),
+ loopCond: c.LoopCondition(),
+ loopStep: c.LoopStep(),
+ result: c.Result(),
+ }
+ case IdentKind:
+ e.exprKindCase = baseIdentExpr(other.AsIdent())
+ case ListKind:
+ l := other.AsList()
+ optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices()))
+ for _, idx := range l.OptionalIndices() {
+ optIndexMap[idx] = struct{}{}
+ }
+ e.exprKindCase = &baseListExpr{
+ elements: l.Elements(),
+ optIndices: l.OptionalIndices(),
+ optIndexMap: optIndexMap,
+ }
+ case LiteralKind:
+ e.exprKindCase = &baseLiteral{Val: other.AsLiteral()}
+ case MapKind:
+ e.exprKindCase = &baseMapExpr{
+ entries: other.AsMap().Entries(),
+ }
+ case SelectKind:
+ s := other.AsSelect()
+ e.exprKindCase = &baseSelectExpr{
+ operand: s.Operand(),
+ field: s.FieldName(),
+ testOnly: s.IsTestOnly(),
+ }
+ case StructKind:
+ s := other.AsStruct()
+ e.exprKindCase = &baseStructExpr{
+ typeName: s.TypeName(),
+ fields: s.Fields(),
+ }
+ case UnspecifiedExprKind:
+ e.exprKindCase = nil
+ }
+}
+
+func (e *expr) RenumberIDs(idGen IDGenerator) {
+ if e == nil {
+ return
+ }
+ e.id = idGen(e.id)
+ if e.exprKindCase != nil {
+ e.exprKindCase.renumberIDs(idGen)
+ }
+}
+
+type baseCallExpr struct {
+ function string
+ target Expr
+ args []Expr
+ isMember bool
+}
+
+func (*baseCallExpr) Kind() ExprKind {
+ return CallKind
+}
+
+func (e *baseCallExpr) FunctionName() string {
+ if e == nil {
+ return ""
+ }
+ return e.function
+}
+
+func (e *baseCallExpr) IsMemberFunction() bool {
+ if e == nil {
+ return false
+ }
+ return e.isMember
+}
+
+func (e *baseCallExpr) Target() Expr {
+ if e == nil || !e.IsMemberFunction() {
+ return nilExpr
+ }
+ return e.target
+}
+
+func (e *baseCallExpr) Args() []Expr {
+ if e == nil {
+ return []Expr{}
+ }
+ return e.args
+}
+
+func (e *baseCallExpr) renumberIDs(idGen IDGenerator) {
+ if e.IsMemberFunction() {
+ e.Target().RenumberIDs(idGen)
+ }
+ for _, arg := range e.Args() {
+ arg.RenumberIDs(idGen)
+ }
+}
+
+func (*baseCallExpr) isExpr() {}
+
+var _ ComprehensionExpr = &baseComprehensionExpr{}
+
+type baseComprehensionExpr struct {
+ iterRange Expr
+ iterVar string
+ iterVar2 string
+ accuVar string
+ accuInit Expr
+ loopCond Expr
+ loopStep Expr
+ result Expr
+}
+
+func (*baseComprehensionExpr) Kind() ExprKind {
+ return ComprehensionKind
+}
+
+func (e *baseComprehensionExpr) IterRange() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.iterRange
+}
+
+func (e *baseComprehensionExpr) IterVar() string {
+ return e.iterVar
+}
+
+func (e *baseComprehensionExpr) IterVar2() string {
+ return e.iterVar2
+}
+
+func (e *baseComprehensionExpr) HasIterVar2() bool {
+ return e.iterVar2 != ""
+}
+
+func (e *baseComprehensionExpr) AccuVar() string {
+ return e.accuVar
+}
+
+func (e *baseComprehensionExpr) AccuInit() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.accuInit
+}
+
+func (e *baseComprehensionExpr) LoopCondition() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.loopCond
+}
+
+func (e *baseComprehensionExpr) LoopStep() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.loopStep
+}
+
+func (e *baseComprehensionExpr) Result() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.result
+}
+
+func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) {
+ e.IterRange().RenumberIDs(idGen)
+ e.AccuInit().RenumberIDs(idGen)
+ e.LoopCondition().RenumberIDs(idGen)
+ e.LoopStep().RenumberIDs(idGen)
+ e.Result().RenumberIDs(idGen)
+}
+
+func (*baseComprehensionExpr) isExpr() {}
+
+var _ exprKindCase = baseIdentExpr("")
+
+type baseIdentExpr string
+
+func (baseIdentExpr) Kind() ExprKind {
+ return IdentKind
+}
+
+func (e baseIdentExpr) renumberIDs(IDGenerator) {}
+
+func (baseIdentExpr) isExpr() {}
+
+var _ exprKindCase = &baseLiteral{}
+var _ ref.Val = &baseLiteral{}
+
+type baseLiteral struct {
+ ref.Val
+}
+
+func (*baseLiteral) Kind() ExprKind {
+ return LiteralKind
+}
+
+func (l *baseLiteral) renumberIDs(IDGenerator) {}
+
+func (*baseLiteral) isExpr() {}
+
+var _ ListExpr = &baseListExpr{}
+
+type baseListExpr struct {
+ elements []Expr
+ optIndices []int32
+ optIndexMap map[int32]struct{}
+}
+
+func (*baseListExpr) Kind() ExprKind {
+ return ListKind
+}
+
+func (e *baseListExpr) Elements() []Expr {
+ if e == nil {
+ return []Expr{}
+ }
+ return e.elements
+}
+
+func (e *baseListExpr) IsOptional(index int32) bool {
+ _, found := e.optIndexMap[index]
+ return found
+}
+
+func (e *baseListExpr) OptionalIndices() []int32 {
+ if e == nil {
+ return []int32{}
+ }
+ return e.optIndices
+}
+
+func (e *baseListExpr) Size() int {
+ return len(e.Elements())
+}
+
+func (e *baseListExpr) renumberIDs(idGen IDGenerator) {
+ for _, elem := range e.Elements() {
+ elem.RenumberIDs(idGen)
+ }
+}
+
+func (*baseListExpr) isExpr() {}
+
+type baseMapExpr struct {
+ entries []EntryExpr
+}
+
+func (*baseMapExpr) Kind() ExprKind {
+ return MapKind
+}
+
+func (e *baseMapExpr) Entries() []EntryExpr {
+ if e == nil {
+ return []EntryExpr{}
+ }
+ return e.entries
+}
+
+func (e *baseMapExpr) Size() int {
+ return len(e.Entries())
+}
+
+func (e *baseMapExpr) renumberIDs(idGen IDGenerator) {
+ for _, entry := range e.Entries() {
+ entry.RenumberIDs(idGen)
+ }
+}
+
+func (*baseMapExpr) isExpr() {}
+
+type baseSelectExpr struct {
+ operand Expr
+ field string
+ testOnly bool
+}
+
+func (*baseSelectExpr) Kind() ExprKind {
+ return SelectKind
+}
+
+func (e *baseSelectExpr) Operand() Expr {
+ if e == nil || e.operand == nil {
+ return nilExpr
+ }
+ return e.operand
+}
+
+func (e *baseSelectExpr) FieldName() string {
+ if e == nil {
+ return ""
+ }
+ return e.field
+}
+
+func (e *baseSelectExpr) IsTestOnly() bool {
+ if e == nil {
+ return false
+ }
+ return e.testOnly
+}
+
+func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) {
+ e.Operand().RenumberIDs(idGen)
+}
+
+func (*baseSelectExpr) isExpr() {}
+
+type baseStructExpr struct {
+ typeName string
+ fields []EntryExpr
+}
+
+func (*baseStructExpr) Kind() ExprKind {
+ return StructKind
+}
+
+func (e *baseStructExpr) TypeName() string {
+ if e == nil {
+ return ""
+ }
+ return e.typeName
+}
+
+func (e *baseStructExpr) Fields() []EntryExpr {
+ if e == nil {
+ return []EntryExpr{}
+ }
+ return e.fields
+}
+
+func (e *baseStructExpr) renumberIDs(idGen IDGenerator) {
+ for _, f := range e.Fields() {
+ f.RenumberIDs(idGen)
+ }
+}
+
+func (*baseStructExpr) isExpr() {}
+
+type entryExprKindCase interface {
+ Kind() EntryExprKind
+
+ renumberIDs(IDGenerator)
+
+ isEntryExpr()
+}
+
+var _ EntryExpr = &entryExpr{}
+
+type entryExpr struct {
+ id int64
+ entryExprKindCase
+}
+
+func (e *entryExpr) ID() int64 {
+ return e.id
+}
+
+func (e *entryExpr) AsMapEntry() MapEntry {
+ if e.Kind() != MapEntryKind {
+ return nilMapEntry
+ }
+ return e.entryExprKindCase.(MapEntry)
+}
+
+func (e *entryExpr) AsStructField() StructField {
+ if e.Kind() != StructFieldKind {
+ return nilStructField
+ }
+ return e.entryExprKindCase.(StructField)
+}
+
+func (e *entryExpr) RenumberIDs(idGen IDGenerator) {
+ e.id = idGen(e.id)
+ e.entryExprKindCase.renumberIDs(idGen)
+}
+
+type baseMapEntry struct {
+ key Expr
+ value Expr
+ isOptional bool
+}
+
+func (e *baseMapEntry) Kind() EntryExprKind {
+ return MapEntryKind
+}
+
+func (e *baseMapEntry) Key() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.key
+}
+
+func (e *baseMapEntry) Value() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.value
+}
+
+func (e *baseMapEntry) IsOptional() bool {
+ if e == nil {
+ return false
+ }
+ return e.isOptional
+}
+
+func (e *baseMapEntry) renumberIDs(idGen IDGenerator) {
+ e.Key().RenumberIDs(idGen)
+ e.Value().RenumberIDs(idGen)
+}
+
+func (*baseMapEntry) isEntryExpr() {}
+
+type baseStructField struct {
+ field string
+ value Expr
+ isOptional bool
+}
+
+func (f *baseStructField) Kind() EntryExprKind {
+ return StructFieldKind
+}
+
+func (f *baseStructField) Name() string {
+ if f == nil {
+ return ""
+ }
+ return f.field
+}
+
+func (f *baseStructField) Value() Expr {
+ if f == nil {
+ return nilExpr
+ }
+ return f.value
+}
+
+func (f *baseStructField) IsOptional() bool {
+ if f == nil {
+ return false
+ }
+ return f.isOptional
+}
+
+func (f *baseStructField) renumberIDs(idGen IDGenerator) {
+ f.Value().RenumberIDs(idGen)
+}
+
+func (*baseStructField) isEntryExpr() {}
+
+var (
+ nilExpr *expr = nil
+ nilCall *baseCallExpr = nil
+ nilCompre *baseComprehensionExpr = nil
+ nilList *baseListExpr = nil
+ nilMap *baseMapExpr = nil
+ nilMapEntry *baseMapEntry = nil
+ nilSel *baseSelectExpr = nil
+ nilStruct *baseStructExpr = nil
+ nilStructField *baseStructField = nil
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/factory.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/factory.go
new file mode 100644
index 0000000000..994806b793
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/factory.go
@@ -0,0 +1,313 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import "github.com/google/cel-go/common/types/ref"
+
+// ExprFactory interfaces defines a set of methods necessary for building native expression values.
+type ExprFactory interface {
+ // CopyExpr creates a deep copy of the input Expr value.
+ CopyExpr(Expr) Expr
+
+ // CopyEntryExpr creates a deep copy of the input EntryExpr value.
+ CopyEntryExpr(EntryExpr) EntryExpr
+
+ // NewCall creates an Expr value representing a global function call.
+ NewCall(id int64, function string, args ...Expr) Expr
+
+ // NewComprehension creates an Expr value representing a one-variable comprehension over a value range.
+ NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
+
+ // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range.
+ NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
+
+ // NewMemberCall creates an Expr value representing a member function call.
+ NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr
+
+ // NewIdent creates an Expr value representing an identifier.
+ NewIdent(id int64, name string) Expr
+
+ // NewAccuIdent creates an Expr value representing an accumulator identifier within a
+ //comprehension.
+ NewAccuIdent(id int64) Expr
+
+ // NewLiteral creates an Expr value representing a literal value, such as a string or integer.
+ NewLiteral(id int64, value ref.Val) Expr
+
+ // NewList creates an Expr value representing a list literal expression with optional indices.
+ //
+ // Optional indicies will typically be empty unless the CEL optional types are enabled.
+ NewList(id int64, elems []Expr, optIndices []int32) Expr
+
+ // NewMap creates an Expr value representing a map literal expression
+ NewMap(id int64, entries []EntryExpr) Expr
+
+ // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether
+ // the key is optionally set.
+ NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr
+
+ // NewPresenceTest creates an Expr representing a field presence test on an operand expression.
+ NewPresenceTest(id int64, operand Expr, field string) Expr
+
+ // NewSelect creates an Expr representing a field selection on an operand expression.
+ NewSelect(id int64, operand Expr, field string) Expr
+
+ // NewStruct creates an Expr value representing a struct literal with a given type name and a
+ // set of field initializers.
+ NewStruct(id int64, typeName string, fields []EntryExpr) Expr
+
+ // NewStructField creates a StructField with a given field name, value, and a flag indicating
+ // whether the field is optionally set.
+ NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr
+
+ // NewUnspecifiedExpr creates an empty expression node.
+ NewUnspecifiedExpr(id int64) Expr
+
+ isExprFactory()
+}
+
+type baseExprFactory struct{}
+
+// NewExprFactory creates an ExprFactory instance.
+func NewExprFactory() ExprFactory {
+ return &baseExprFactory{}
+}
+
+func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr {
+ if len(args) == 0 {
+ args = []Expr{}
+ }
+ return fac.newExpr(
+ id,
+ &baseCallExpr{
+ function: function,
+ target: nilExpr,
+ args: args,
+ isMember: false,
+ })
+}
+
+func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr {
+ if len(args) == 0 {
+ args = []Expr{}
+ }
+ return fac.newExpr(
+ id,
+ &baseCallExpr{
+ function: function,
+ target: target,
+ args: args,
+ isMember: true,
+ })
+}
+
+func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
+ // Set the iter_var2 to empty string to indicate the second variable is omitted
+ return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result)
+}
+
+func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
+ return fac.newExpr(
+ id,
+ &baseComprehensionExpr{
+ iterRange: iterRange,
+ iterVar: iterVar,
+ iterVar2: iterVar2,
+ accuVar: accuVar,
+ accuInit: accuInit,
+ loopCond: loopCond,
+ loopStep: loopStep,
+ result: result,
+ })
+}
+
+func (fac *baseExprFactory) NewIdent(id int64, name string) Expr {
+ return fac.newExpr(id, baseIdentExpr(name))
+}
+
+func (fac *baseExprFactory) NewAccuIdent(id int64) Expr {
+ return fac.NewIdent(id, "__result__")
+}
+
+func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr {
+ return fac.newExpr(id, &baseLiteral{Val: value})
+}
+
+func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr {
+ optIndexMap := make(map[int32]struct{}, len(optIndices))
+ for _, idx := range optIndices {
+ optIndexMap[idx] = struct{}{}
+ }
+ return fac.newExpr(id,
+ &baseListExpr{
+ elements: elems,
+ optIndices: optIndices,
+ optIndexMap: optIndexMap,
+ })
+}
+
+func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr {
+ return fac.newExpr(id, &baseMapExpr{entries: entries})
+}
+
+func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr {
+ return fac.newEntryExpr(
+ id,
+ &baseMapEntry{
+ key: key,
+ value: value,
+ isOptional: isOptional,
+ })
+}
+
+func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr {
+ return fac.newExpr(
+ id,
+ &baseSelectExpr{
+ operand: operand,
+ field: field,
+ testOnly: true,
+ })
+}
+
+func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr {
+ return fac.newExpr(
+ id,
+ &baseSelectExpr{
+ operand: operand,
+ field: field,
+ })
+}
+
+func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr {
+ return fac.newExpr(
+ id,
+ &baseStructExpr{
+ typeName: typeName,
+ fields: fields,
+ })
+}
+
+func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr {
+ return fac.newEntryExpr(
+ id,
+ &baseStructField{
+ field: field,
+ value: value,
+ isOptional: isOptional,
+ })
+}
+
+func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr {
+ return fac.newExpr(id, nil)
+}
+
+func (fac *baseExprFactory) CopyExpr(e Expr) Expr {
+ // unwrap navigable expressions to avoid unnecessary allocations during copying.
+ if nav, ok := e.(*navigableExprImpl); ok {
+ e = nav.Expr
+ }
+ switch e.Kind() {
+ case CallKind:
+ c := e.AsCall()
+ argsCopy := make([]Expr, len(c.Args()))
+ for i, arg := range c.Args() {
+ argsCopy[i] = fac.CopyExpr(arg)
+ }
+ if !c.IsMemberFunction() {
+ return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...)
+ }
+ return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...)
+ case ComprehensionKind:
+ compre := e.AsComprehension()
+ return fac.NewComprehensionTwoVar(e.ID(),
+ fac.CopyExpr(compre.IterRange()),
+ compre.IterVar(),
+ compre.IterVar2(),
+ compre.AccuVar(),
+ fac.CopyExpr(compre.AccuInit()),
+ fac.CopyExpr(compre.LoopCondition()),
+ fac.CopyExpr(compre.LoopStep()),
+ fac.CopyExpr(compre.Result()))
+ case IdentKind:
+ return fac.NewIdent(e.ID(), e.AsIdent())
+ case ListKind:
+ l := e.AsList()
+ elemsCopy := make([]Expr, l.Size())
+ for i, elem := range l.Elements() {
+ elemsCopy[i] = fac.CopyExpr(elem)
+ }
+ return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices())
+ case LiteralKind:
+ return fac.NewLiteral(e.ID(), e.AsLiteral())
+ case MapKind:
+ m := e.AsMap()
+ entriesCopy := make([]EntryExpr, m.Size())
+ for i, entry := range m.Entries() {
+ entriesCopy[i] = fac.CopyEntryExpr(entry)
+ }
+ return fac.NewMap(e.ID(), entriesCopy)
+ case SelectKind:
+ s := e.AsSelect()
+ if s.IsTestOnly() {
+ return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
+ }
+ return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
+ case StructKind:
+ s := e.AsStruct()
+ fieldsCopy := make([]EntryExpr, len(s.Fields()))
+ for i, field := range s.Fields() {
+ fieldsCopy[i] = fac.CopyEntryExpr(field)
+ }
+ return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy)
+ default:
+ return fac.NewUnspecifiedExpr(e.ID())
+ }
+}
+
+func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr {
+ switch e.Kind() {
+ case MapEntryKind:
+ entry := e.AsMapEntry()
+ return fac.NewMapEntry(e.ID(),
+ fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional())
+ case StructFieldKind:
+ field := e.AsStructField()
+ return fac.NewStructField(e.ID(),
+ field.Name(), fac.CopyExpr(field.Value()), field.IsOptional())
+ default:
+ return fac.newEntryExpr(e.ID(), nil)
+ }
+}
+
+func (*baseExprFactory) isExprFactory() {}
+
+func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr {
+ return &expr{
+ id: id,
+ exprKindCase: e,
+ }
+}
+
+func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr {
+ return &entryExpr{
+ id: id,
+ entryExprKindCase: e,
+ }
+}
+
+var (
+ defaultFactory = &baseExprFactory{}
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/navigable.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/navigable.go
new file mode 100644
index 0000000000..d7a90fb7c3
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/navigable.go
@@ -0,0 +1,660 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// NavigableExpr represents the base navigable expression value with methods to inspect the
+// parent and child expressions.
+type NavigableExpr interface {
+ Expr
+
+ // Type of the expression.
+ //
+ // If the expression is type-checked, the type check metadata is returned. If the expression
+ // has not been type-checked, the types.DynType value is returned.
+ Type() *types.Type
+
+ // Parent returns the parent expression node, if one exists.
+ Parent() (NavigableExpr, bool)
+
+ // Children returns a list of child expression nodes.
+ Children() []NavigableExpr
+
+ // Depth indicates the depth in the expression tree.
+ //
+ // The root expression has depth 0.
+ Depth() int
+}
+
+// NavigateAST converts an AST to a NavigableExpr
+func NavigateAST(ast *AST) NavigableExpr {
+ return NavigateExpr(ast, ast.Expr())
+}
+
+// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST.
+//
+// If the expression is already a NavigableExpr, the parent and depth information will be
+// propagated on the new NavigableExpr value; otherwise, the expr value will be treated
+// as though it is the root of the expression graph with a depth of 0.
+func NavigateExpr(ast *AST, expr Expr) NavigableExpr {
+ depth := 0
+ var parent NavigableExpr = nil
+ if nav, ok := expr.(NavigableExpr); ok {
+ depth = nav.Depth()
+ parent, _ = nav.Parent()
+ }
+ return newNavigableExpr(ast, parent, expr, depth)
+}
+
+// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
+//
+// This function type should be use with the `Match` and `MatchList` calls.
+type ExprMatcher func(NavigableExpr) bool
+
+// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
+// is comprised of all constant values, such as a simple literal or even list and map literal.
+func ConstantValueMatcher() ExprMatcher {
+ return matchIsConstantValue
+}
+
+// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
+// the specified `kind`.
+func KindMatcher(kind ExprKind) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ return e.Kind() == kind
+ }
+}
+
+// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
+// function name is equal to `funcName`.
+func FunctionMatcher(funcName string) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ if e.Kind() != CallKind {
+ return false
+ }
+ return e.AsCall().FunctionName() == funcName
+ }
+}
+
+// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
+//
+// Such a result would work well with subsequent MatchList calls.
+func AllMatcher() ExprMatcher {
+ return func(NavigableExpr) bool {
+ return true
+ }
+}
+
+// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values
+// matching the input criteria in post-order (bottom up).
+func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ matches := []NavigableExpr{}
+ navVisitor := &baseVisitor{
+ visitExpr: func(e Expr) {
+ nav := e.(NavigableExpr)
+ if matcher(nav) {
+ matches = append(matches, nav)
+ }
+ },
+ }
+ visit(expr, navVisitor, postOrder, 0, 0)
+ return matches
+}
+
+// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
+// subset of NavigableExpr values which match.
+func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ matches := []NavigableExpr{}
+ navVisitor := &baseVisitor{
+ visitExpr: func(e Expr) {
+ nav := e.(NavigableExpr)
+ if matcher(nav) {
+ matches = append(matches, nav)
+ }
+ },
+ }
+ for _, expr := range exprs {
+ visit(expr, navVisitor, postOrder, 0, 1)
+ }
+ return matches
+}
+
+// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph.
+type Visitor interface {
+ // VisitExpr visits the input expression.
+ VisitExpr(Expr)
+
+ // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry.
+ VisitEntryExpr(EntryExpr)
+}
+
+type baseVisitor struct {
+ visitExpr func(Expr)
+ visitEntryExpr func(EntryExpr)
+}
+
+// VisitExpr visits the Expr if the internal expr visitor has been configured.
+func (v *baseVisitor) VisitExpr(e Expr) {
+ if v.visitExpr != nil {
+ v.visitExpr(e)
+ }
+}
+
+// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured.
+func (v *baseVisitor) VisitEntryExpr(e EntryExpr) {
+ if v.visitEntryExpr != nil {
+ v.visitEntryExpr(e)
+ }
+}
+
+// NewExprVisitor creates a visitor which only visits expression nodes.
+func NewExprVisitor(v func(Expr)) Visitor {
+ return &baseVisitor{
+ visitExpr: v,
+ visitEntryExpr: nil,
+ }
+}
+
+// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up).
+func PostOrderVisit(expr Expr, visitor Visitor) {
+ visit(expr, visitor, postOrder, 0, 0)
+}
+
+// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down).
+func PreOrderVisit(expr Expr, visitor Visitor) {
+ visit(expr, visitor, preOrder, 0, 0)
+}
+
+type visitOrder int
+
+const (
+ preOrder = iota + 1
+ postOrder
+)
+
+// TODO: consider exposing a way to configure a limit for the max visit depth.
+// It's possible that we could want to configure this on the NewExprVisitor()
+// and through MatchDescendents() / MaxID().
+func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) {
+ if maxDepth > 0 && depth == maxDepth {
+ return
+ }
+ if order == preOrder {
+ visitor.VisitExpr(expr)
+ }
+ switch expr.Kind() {
+ case CallKind:
+ c := expr.AsCall()
+ if c.IsMemberFunction() {
+ visit(c.Target(), visitor, order, depth+1, maxDepth)
+ }
+ for _, arg := range c.Args() {
+ visit(arg, visitor, order, depth+1, maxDepth)
+ }
+ case ComprehensionKind:
+ c := expr.AsComprehension()
+ visit(c.IterRange(), visitor, order, depth+1, maxDepth)
+ visit(c.AccuInit(), visitor, order, depth+1, maxDepth)
+ visit(c.LoopCondition(), visitor, order, depth+1, maxDepth)
+ visit(c.LoopStep(), visitor, order, depth+1, maxDepth)
+ visit(c.Result(), visitor, order, depth+1, maxDepth)
+ case ListKind:
+ l := expr.AsList()
+ for _, elem := range l.Elements() {
+ visit(elem, visitor, order, depth+1, maxDepth)
+ }
+ case MapKind:
+ m := expr.AsMap()
+ for _, e := range m.Entries() {
+ if order == preOrder {
+ visitor.VisitEntryExpr(e)
+ }
+ entry := e.AsMapEntry()
+ visit(entry.Key(), visitor, order, depth+1, maxDepth)
+ visit(entry.Value(), visitor, order, depth+1, maxDepth)
+ if order == postOrder {
+ visitor.VisitEntryExpr(e)
+ }
+ }
+ case SelectKind:
+ visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth)
+ case StructKind:
+ s := expr.AsStruct()
+ for _, f := range s.Fields() {
+ visitor.VisitEntryExpr(f)
+ visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth)
+ }
+ }
+ if order == postOrder {
+ visitor.VisitExpr(expr)
+ }
+}
+
+func matchIsConstantValue(e NavigableExpr) bool {
+ if e.Kind() == LiteralKind {
+ return true
+ }
+ if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
+ for _, child := range e.Children() {
+ if !matchIsConstantValue(child) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr {
+ // Reduce navigable expression nesting by unwrapping the embedded Expr value.
+ if nav, ok := expr.(*navigableExprImpl); ok {
+ expr = nav.Expr
+ }
+ nav := &navigableExprImpl{
+ Expr: expr,
+ depth: depth,
+ ast: ast,
+ parent: parent,
+ createChildren: getChildFactory(expr),
+ }
+ return nav
+}
+
+type navigableExprImpl struct {
+ Expr
+ depth int
+ ast *AST
+ parent NavigableExpr
+ createChildren childFactory
+}
+
+func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
+ if nav.parent != nil {
+ return nav.parent, true
+ }
+ return nil, false
+}
+
+func (nav *navigableExprImpl) ID() int64 {
+ return nav.Expr.ID()
+}
+
+func (nav *navigableExprImpl) Kind() ExprKind {
+ return nav.Expr.Kind()
+}
+
+func (nav *navigableExprImpl) Type() *types.Type {
+ return nav.ast.GetType(nav.ID())
+}
+
+func (nav *navigableExprImpl) Children() []NavigableExpr {
+ return nav.createChildren(nav)
+}
+
+func (nav *navigableExprImpl) Depth() int {
+ return nav.depth
+}
+
+func (nav *navigableExprImpl) AsCall() CallExpr {
+ return navigableCallImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr {
+ return navigableComprehensionImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsIdent() string {
+ return nav.Expr.AsIdent()
+}
+
+func (nav *navigableExprImpl) AsList() ListExpr {
+ return navigableListImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsLiteral() ref.Val {
+ return nav.Expr.AsLiteral()
+}
+
+func (nav *navigableExprImpl) AsMap() MapExpr {
+ return navigableMapImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsSelect() SelectExpr {
+ return navigableSelectImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsStruct() StructExpr {
+ return navigableStructImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr {
+ return newNavigableExpr(nav.ast, nav, e, nav.depth+1)
+}
+
+func (nav *navigableExprImpl) isExpr() {}
+
+type navigableCallImpl struct {
+ *navigableExprImpl
+}
+
+func (call navigableCallImpl) FunctionName() string {
+ return call.Expr.AsCall().FunctionName()
+}
+
+func (call navigableCallImpl) IsMemberFunction() bool {
+ return call.Expr.AsCall().IsMemberFunction()
+}
+
+func (call navigableCallImpl) Target() Expr {
+ t := call.Expr.AsCall().Target()
+ if t != nil {
+ return call.createChild(t)
+ }
+ return nil
+}
+
+func (call navigableCallImpl) Args() []Expr {
+ args := call.Expr.AsCall().Args()
+ navArgs := make([]Expr, len(args))
+ for i, a := range args {
+ navArgs[i] = call.createChild(a)
+ }
+ return navArgs
+}
+
+type navigableComprehensionImpl struct {
+ *navigableExprImpl
+}
+
+func (comp navigableComprehensionImpl) IterRange() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().IterRange())
+}
+
+func (comp navigableComprehensionImpl) IterVar() string {
+ return comp.Expr.AsComprehension().IterVar()
+}
+
+func (comp navigableComprehensionImpl) IterVar2() string {
+ return comp.Expr.AsComprehension().IterVar2()
+}
+
+func (comp navigableComprehensionImpl) HasIterVar2() bool {
+ return comp.Expr.AsComprehension().HasIterVar2()
+}
+
+func (comp navigableComprehensionImpl) AccuVar() string {
+ return comp.Expr.AsComprehension().AccuVar()
+}
+
+func (comp navigableComprehensionImpl) AccuInit() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().AccuInit())
+}
+
+func (comp navigableComprehensionImpl) LoopCondition() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().LoopCondition())
+}
+
+func (comp navigableComprehensionImpl) LoopStep() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().LoopStep())
+}
+
+func (comp navigableComprehensionImpl) Result() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().Result())
+}
+
+type navigableListImpl struct {
+ *navigableExprImpl
+}
+
+func (l navigableListImpl) Elements() []Expr {
+ pbElems := l.Expr.AsList().Elements()
+ elems := make([]Expr, len(pbElems))
+ for i := 0; i < len(pbElems); i++ {
+ elems[i] = l.createChild(pbElems[i])
+ }
+ return elems
+}
+
+func (l navigableListImpl) IsOptional(index int32) bool {
+ return l.Expr.AsList().IsOptional(index)
+}
+
+func (l navigableListImpl) OptionalIndices() []int32 {
+ return l.Expr.AsList().OptionalIndices()
+}
+
+func (l navigableListImpl) Size() int {
+ return l.Expr.AsList().Size()
+}
+
+type navigableMapImpl struct {
+ *navigableExprImpl
+}
+
+func (m navigableMapImpl) Entries() []EntryExpr {
+ mapExpr := m.Expr.AsMap()
+ entries := make([]EntryExpr, len(mapExpr.Entries()))
+ for i, e := range mapExpr.Entries() {
+ entry := e.AsMapEntry()
+ entries[i] = &entryExpr{
+ id: e.ID(),
+ entryExprKindCase: navigableEntryImpl{
+ key: m.createChild(entry.Key()),
+ val: m.createChild(entry.Value()),
+ isOpt: entry.IsOptional(),
+ },
+ }
+ }
+ return entries
+}
+
+func (m navigableMapImpl) Size() int {
+ return m.Expr.AsMap().Size()
+}
+
+type navigableEntryImpl struct {
+ key NavigableExpr
+ val NavigableExpr
+ isOpt bool
+}
+
+func (e navigableEntryImpl) Kind() EntryExprKind {
+ return MapEntryKind
+}
+
+func (e navigableEntryImpl) Key() Expr {
+ return e.key
+}
+
+func (e navigableEntryImpl) Value() Expr {
+ return e.val
+}
+
+func (e navigableEntryImpl) IsOptional() bool {
+ return e.isOpt
+}
+
+func (e navigableEntryImpl) renumberIDs(IDGenerator) {}
+
+func (e navigableEntryImpl) isEntryExpr() {}
+
+type navigableSelectImpl struct {
+ *navigableExprImpl
+}
+
+func (sel navigableSelectImpl) FieldName() string {
+ return sel.Expr.AsSelect().FieldName()
+}
+
+func (sel navigableSelectImpl) IsTestOnly() bool {
+ return sel.Expr.AsSelect().IsTestOnly()
+}
+
+func (sel navigableSelectImpl) Operand() Expr {
+ return sel.createChild(sel.Expr.AsSelect().Operand())
+}
+
+type navigableStructImpl struct {
+ *navigableExprImpl
+}
+
+func (s navigableStructImpl) TypeName() string {
+ return s.Expr.AsStruct().TypeName()
+}
+
+func (s navigableStructImpl) Fields() []EntryExpr {
+ fieldInits := s.Expr.AsStruct().Fields()
+ fields := make([]EntryExpr, len(fieldInits))
+ for i, f := range fieldInits {
+ field := f.AsStructField()
+ fields[i] = &entryExpr{
+ id: f.ID(),
+ entryExprKindCase: navigableFieldImpl{
+ name: field.Name(),
+ val: s.createChild(field.Value()),
+ isOpt: field.IsOptional(),
+ },
+ }
+ }
+ return fields
+}
+
+type navigableFieldImpl struct {
+ name string
+ val NavigableExpr
+ isOpt bool
+}
+
+func (f navigableFieldImpl) Kind() EntryExprKind {
+ return StructFieldKind
+}
+
+func (f navigableFieldImpl) Name() string {
+ return f.name
+}
+
+func (f navigableFieldImpl) Value() Expr {
+ return f.val
+}
+
+func (f navigableFieldImpl) IsOptional() bool {
+ return f.isOpt
+}
+
+func (f navigableFieldImpl) renumberIDs(IDGenerator) {}
+
+func (f navigableFieldImpl) isEntryExpr() {}
+
+func getChildFactory(expr Expr) childFactory {
+ if expr == nil {
+ return noopFactory
+ }
+ switch expr.Kind() {
+ case LiteralKind:
+ return noopFactory
+ case IdentKind:
+ return noopFactory
+ case SelectKind:
+ return selectFactory
+ case CallKind:
+ return callArgFactory
+ case ListKind:
+ return listElemFactory
+ case MapKind:
+ return mapEntryFactory
+ case StructKind:
+ return structEntryFactory
+ case ComprehensionKind:
+ return comprehensionFactory
+ default:
+ return noopFactory
+ }
+}
+
+type childFactory func(*navigableExprImpl) []NavigableExpr
+
+func noopFactory(*navigableExprImpl) []NavigableExpr {
+ return nil
+}
+
+func selectFactory(nav *navigableExprImpl) []NavigableExpr {
+ return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())}
+}
+
+func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
+ call := nav.Expr.AsCall()
+ argCount := len(call.Args())
+ if call.IsMemberFunction() {
+ argCount++
+ }
+ navExprs := make([]NavigableExpr, argCount)
+ i := 0
+ if call.IsMemberFunction() {
+ navExprs[i] = nav.createChild(call.Target())
+ i++
+ }
+ for _, arg := range call.Args() {
+ navExprs[i] = nav.createChild(arg)
+ i++
+ }
+ return navExprs
+}
+
+func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
+ l := nav.Expr.AsList()
+ navExprs := make([]NavigableExpr, len(l.Elements()))
+ for i, e := range l.Elements() {
+ navExprs[i] = nav.createChild(e)
+ }
+ return navExprs
+}
+
+func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.Expr.AsStruct()
+ entries := make([]NavigableExpr, len(s.Fields()))
+ for i, e := range s.Fields() {
+ f := e.AsStructField()
+ entries[i] = nav.createChild(f.Value())
+ }
+ return entries
+}
+
+func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ m := nav.Expr.AsMap()
+ entries := make([]NavigableExpr, len(m.Entries())*2)
+ j := 0
+ for _, e := range m.Entries() {
+ mapEntry := e.AsMapEntry()
+ entries[j] = nav.createChild(mapEntry.Key())
+ entries[j+1] = nav.createChild(mapEntry.Value())
+ j += 2
+ }
+ return entries
+}
+
+func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
+ compre := nav.Expr.AsComprehension()
+ return []NavigableExpr{
+ nav.createChild(compre.IterRange()),
+ nav.createChild(compre.AccuInit()),
+ nav.createChild(compre.LoopCondition()),
+ nav.createChild(compre.LoopStep()),
+ nav.createChild(compre.Result()),
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
new file mode 100644
index 0000000000..81197f0641
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "container.go",
+ ],
+ importpath = "github.com/google/cel-go/common/containers",
+ deps = [
+ "//common/ast:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "container_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/ast:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/containers/container.go b/hack/tools/vendor/github.com/google/cel-go/common/containers/container.go
new file mode 100644
index 0000000000..3097a3f785
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/containers/container.go
@@ -0,0 +1,328 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package containers defines types and functions for resolving qualified names within a namespace
+// or type provided to CEL.
+package containers
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/ast"
+)
+
+var (
+ // DefaultContainer has an empty container name.
+ DefaultContainer *Container = nil
+
+ // Empty map to search for aliases when needed.
+ noAliases = make(map[string]string)
+)
+
+// NewContainer creates a new Container with the fully-qualified name.
+func NewContainer(opts ...ContainerOption) (*Container, error) {
+ var c *Container
+ var err error
+ for _, opt := range opts {
+ c, err = opt(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+}
+
+// Container holds a reference to an optional qualified container name and set of aliases.
+//
+// The program container can be used to simplify variable, function, and type specification within
+// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more
+// details.
+type Container struct {
+ name string
+ aliases map[string]string
+}
+
+// Extend creates a new Container with the existing settings and applies a series of
+// ContainerOptions to further configure the new container.
+func (c *Container) Extend(opts ...ContainerOption) (*Container, error) {
+ if c == nil {
+ return NewContainer(opts...)
+ }
+ // Copy the name and aliases of the existing container.
+ ext := &Container{name: c.Name()}
+ if len(c.aliasSet()) > 0 {
+ aliasSet := make(map[string]string, len(c.aliasSet()))
+ for k, v := range c.aliasSet() {
+ aliasSet[k] = v
+ }
+ ext.aliases = aliasSet
+ }
+ // Apply the new options to the container.
+ var err error
+ for _, opt := range opts {
+ ext, err = opt(ext)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ext, nil
+}
+
+// Name returns the fully-qualified name of the container.
+//
+// The name may conceptually be a namespace, package, or type.
+func (c *Container) Name() string {
+ if c == nil {
+ return ""
+ }
+ return c.name
+}
+
+// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution
+// order.
+//
+// Names which shadow other names are returned first. If a name includes a leading dot ('.'),
+// the name is treated as an absolute identifier which cannot be shadowed.
+//
+// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order:
+//
+// a.b.c.M.N.R.s
+// a.b.c.M.R.s
+// a.b.c.R.s
+// a.b.R.s
+// a.R.s
+// R.s
+//
+// If aliases or abbreviations are configured for the container, then alias names will take
+// precedence over containerized names.
+func (c *Container) ResolveCandidateNames(name string) []string {
+ if strings.HasPrefix(name, ".") {
+ qn := name[1:]
+ alias, isAlias := c.findAlias(qn)
+ if isAlias {
+ return []string{alias}
+ }
+ return []string{qn}
+ }
+ alias, isAlias := c.findAlias(name)
+ if isAlias {
+ return []string{alias}
+ }
+ if c.Name() == "" {
+ return []string{name}
+ }
+ nextCont := c.Name()
+ candidates := []string{nextCont + "." + name}
+ for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") {
+ nextCont = nextCont[:i]
+ candidates = append(candidates, nextCont+"."+name)
+ }
+ return append(candidates, name)
+}
+
+// aliasSet returns the alias to fully-qualified name mapping stored in the container.
+func (c *Container) aliasSet() map[string]string {
+ if c == nil || c.aliases == nil {
+ return noAliases
+ }
+ return c.aliases
+}
+
+// findAlias takes a name as input and returns an alias expansion if one exists.
+//
+// If the name is qualified, the first component of the qualified name is checked against known
+// aliases. Any alias that is found in a qualified name is expanded in the result:
+//
+// alias: R -> my.alias.R
+// name: R.S.T
+// output: my.alias.R.S.T
+//
+// Note, the name must not have a leading dot.
+func (c *Container) findAlias(name string) (string, bool) {
+ // If an alias exists for the name, ensure it is searched last.
+ simple := name
+ qualifier := ""
+ dot := strings.Index(name, ".")
+ if dot >= 0 {
+ simple = name[0:dot]
+ qualifier = name[dot:]
+ }
+ alias, found := c.aliasSet()[simple]
+ if !found {
+ return "", false
+ }
+ return alias + qualifier, true
+}
+
+// ContainerOption specifies a functional configuration option for a Container.
+//
+// Note, ContainerOption implementations must be able to handle nil container inputs.
+type ContainerOption func(*Container) (*Container, error)
+
+// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
+//
+// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
+// Abbreviations can be useful when working with variables, functions, and especially types from
+// multiple namespaces:
+//
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
+//
+// Only one the qualified names above may be used as the CEL container, so at least one of these
+// references must be a long qualified name within an otherwise short CEL program. Using the
+// following abbreviations, the program becomes much simpler:
+//
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
+//
+// There are a few rules for the qualified names and the simple abbreviations generated from them:
+// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
+// - The last element in the qualified name is the abbreviation.
+// - Abbreviations must not collide with each other.
+// - The abbreviation must not collide with unqualified names in use.
+//
+// Abbreviations are distinct from container-based references in the following important ways:
+// - Abbreviations must expand to a fully-qualified name.
+// - Expanded abbreviations do not participate in namespace resolution.
+// - Abbreviation expansion is done instead of the container search for a matching identifier.
+// - Containers follow C++ namespace resolution rules with searches from the most qualified name
+// to the least qualified name.
+// - Container references within the CEL program may be relative, and are resolved to fully
+// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// If there is ever a case where an identifier could be in both the container and as an
+// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
+// preserved between compilations even as the container evolves.
+func Abbrevs(qualifiedNames ...string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ for _, qn := range qualifiedNames {
+ qn = strings.TrimSpace(qn)
+ for _, r := range qn {
+ if !isIdentifierChar(r) {
+ return nil, fmt.Errorf(
+ "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
+ }
+ }
+ ind := strings.LastIndex(qn, ".")
+ if ind <= 0 || ind >= len(qn)-1 {
+ return nil, fmt.Errorf(
+ "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
+ }
+ alias := qn[ind+1:]
+ var err error
+ c, err = aliasAs("abbreviation", qn, alias)(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+ }
+}
+
+// Alias associates a fully-qualified name with a user-defined alias.
+//
+// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option
+// are more easily traced back to source code. The Alias option is useful for propagating alias
+// configuration from one Container instance to another, and may also be useful for remapping
+// poorly chosen protobuf message / package names.
+//
+// Note: all of the rules that apply to Abbrevs also apply to Alias.
+func Alias(qualifiedName, alias string) ContainerOption {
+ return aliasAs("alias", qualifiedName, alias)
+}
+
+func aliasAs(kind, qualifiedName, alias string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ if len(alias) == 0 || strings.Contains(alias, ".") {
+ return nil, fmt.Errorf(
+ "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias)
+ }
+
+ if qualifiedName[0:1] == "." {
+ return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s",
+ qualifiedName)
+ }
+ ind := strings.LastIndex(qualifiedName, ".")
+ if ind <= 0 || ind == len(qualifiedName)-1 {
+ return nil, fmt.Errorf("%s must refer to a valid qualified name: %s",
+ kind, qualifiedName)
+ }
+ aliasRef, found := c.aliasSet()[alias]
+ if found {
+ return nil, fmt.Errorf(
+ "%s collides with existing reference: name=%s, %s=%s, existing=%s",
+ kind, qualifiedName, kind, alias, aliasRef)
+ }
+ if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias {
+ return nil, fmt.Errorf(
+ "%s collides with container name: name=%s, %s=%s, container=%s",
+ kind, qualifiedName, kind, alias, c.Name())
+ }
+ if c == nil {
+ c = &Container{}
+ }
+ if c.aliases == nil {
+ c.aliases = make(map[string]string)
+ }
+ c.aliases[alias] = qualifiedName
+ return c, nil
+ }
+}
+
+func isIdentifierChar(r rune) bool {
+ return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r))
+}
+
+// Name sets the fully-qualified name of the Container.
+func Name(name string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ if len(name) > 0 && name[0:1] == "." {
+ return nil, fmt.Errorf("container name must not contain a leading '.': %s", name)
+ }
+ if c.Name() == name {
+ return c, nil
+ }
+ if c == nil {
+ return &Container{name: name}, nil
+ }
+ c.name = name
+ return c, nil
+ }
+}
+
+// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
+// 'found' value that indicates if the conversion is successful.
+func ToQualifiedName(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.IdentKind:
+ id := e.AsIdent()
+ return id, true
+ case ast.SelectKind:
+ sel := e.AsSelect()
+ // Test only expressions are not valid as qualified names.
+ if sel.IsTestOnly() {
+ return "", false
+ }
+ if qual, found := ToQualifiedName(sel.Operand()); found {
+ return qual + "." + sel.FieldName(), true
+ }
+ }
+ return "", false
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/cost.go b/hack/tools/vendor/github.com/google/cel-go/common/cost.go
new file mode 100644
index 0000000000..5e24bd0f47
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/cost.go
@@ -0,0 +1,40 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+const (
+ // SelectAndIdentCost is the cost of an operation that accesses an identifier or performs a select.
+ SelectAndIdentCost = 1
+
+ // ConstCost is the cost of an operation that accesses a constant.
+ ConstCost = 0
+
+ // ListCreateBaseCost is the base cost of any operation that creates a new list.
+ ListCreateBaseCost = 10
+
+ // MapCreateBaseCost is the base cost of any operation that creates a new map.
+ MapCreateBaseCost = 30
+
+ // StructCreateBaseCost is the base cost of any operation that creates a new struct.
+ StructCreateBaseCost = 40
+
+ // StringTraversalCostFactor is multiplied to a length of a string when computing the cost of traversing the entire
+ // string once.
+ StringTraversalCostFactor = 0.1
+
+ // RegexStringLengthCostFactor is multiplied ot the length of a regex string pattern when computing the cost of
+ // applying the regex to a string of unit cost.
+ RegexStringLengthCostFactor = 0.25
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
new file mode 100644
index 0000000000..724ed34045
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
@@ -0,0 +1,20 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "debug.go",
+ ],
+ importpath = "github.com/google/cel-go/common/debug",
+ deps = [
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/debug/debug.go b/hack/tools/vendor/github.com/google/cel-go/common/debug/debug.go
new file mode 100644
index 0000000000..25d2e3d71c
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -0,0 +1,314 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debug provides tools to print a parsed expression graph and
+// adorn each expression element with additional metadata.
+package debug
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Adorner returns debug metadata that will be tacked on to the string
+// representation of an expression.
+type Adorner interface {
+ // GetMetadata for the input context.
+ GetMetadata(ctx any) string
+}
+
+// Writer manages writing expressions to an internal string.
+type Writer interface {
+ fmt.Stringer
+
+ // Buffer pushes an expression into an internal queue of expressions to
+ // write to a string.
+ Buffer(e ast.Expr)
+}
+
+type emptyDebugAdorner struct {
+}
+
+var emptyAdorner Adorner = &emptyDebugAdorner{}
+
+func (a *emptyDebugAdorner) GetMetadata(e any) string {
+ return ""
+}
+
+// ToDebugString gives the unadorned string representation of the Expr.
+func ToDebugString(e ast.Expr) string {
+ return ToAdornedDebugString(e, emptyAdorner)
+}
+
+// ToAdornedDebugString gives the adorned string representation of the Expr.
+func ToAdornedDebugString(e ast.Expr, adorner Adorner) string {
+ w := newDebugWriter(adorner)
+ w.Buffer(e)
+ return w.String()
+}
+
+// debugWriter is used to print out pretty-printed debug strings.
+type debugWriter struct {
+ adorner Adorner
+ buffer bytes.Buffer
+ indent int
+ lineStart bool
+}
+
+func newDebugWriter(a Adorner) *debugWriter {
+ return &debugWriter{
+ adorner: a,
+ indent: 0,
+ lineStart: true,
+ }
+}
+
+func (w *debugWriter) Buffer(e ast.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.Kind() {
+ case ast.LiteralKind:
+ w.append(formatLiteral(e.AsLiteral()))
+ case ast.IdentKind:
+ w.append(e.AsIdent())
+ case ast.SelectKind:
+ w.appendSelect(e.AsSelect())
+ case ast.CallKind:
+ w.appendCall(e.AsCall())
+ case ast.ListKind:
+ w.appendList(e.AsList())
+ case ast.MapKind:
+ w.appendMap(e.AsMap())
+ case ast.StructKind:
+ w.appendStruct(e.AsStruct())
+ case ast.ComprehensionKind:
+ w.appendComprehension(e.AsComprehension())
+ }
+ w.adorn(e)
+}
+
+func (w *debugWriter) appendSelect(sel ast.SelectExpr) {
+ w.Buffer(sel.Operand())
+ w.append(".")
+ w.append(sel.FieldName())
+ if sel.IsTestOnly() {
+ w.append("~test-only~")
+ }
+}
+
+func (w *debugWriter) appendCall(call ast.CallExpr) {
+ if call.IsMemberFunction() {
+ w.Buffer(call.Target())
+ w.append(".")
+ }
+ w.append(call.FunctionName())
+ w.append("(")
+ if len(call.Args()) > 0 {
+ w.addIndent()
+ w.appendLine()
+ for i, arg := range call.Args() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(arg)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append(")")
+}
+
+func (w *debugWriter) appendList(list ast.ListExpr) {
+ w.append("[")
+ if len(list.Elements()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, elem := range list.Elements() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(elem)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("]")
+}
+
+func (w *debugWriter) appendStruct(obj ast.StructExpr) {
+ w.append(obj.TypeName())
+ w.append("{")
+ if len(obj.Fields()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, f := range obj.Fields() {
+ field := f.AsStructField()
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ if field.IsOptional() {
+ w.append("?")
+ }
+ w.append(field.Name())
+ w.append(":")
+ w.Buffer(field.Value())
+ w.adorn(f)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendMap(m ast.MapExpr) {
+ w.append("{")
+ if m.Size() > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, e := range m.Entries() {
+ entry := e.AsMapEntry()
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ if entry.IsOptional() {
+ w.append("?")
+ }
+ w.Buffer(entry.Key())
+ w.append(":")
+ w.Buffer(entry.Value())
+ w.adorn(e)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) {
+ w.append("__comprehension__(")
+ w.addIndent()
+ w.appendLine()
+ w.append("// Variable")
+ w.appendLine()
+ w.append(comprehension.IterVar())
+ w.append(",")
+ w.appendLine()
+ if comprehension.HasIterVar2() {
+ w.append(comprehension.IterVar2())
+ w.append(",")
+ w.appendLine()
+ }
+ w.append("// Target")
+ w.appendLine()
+ w.Buffer(comprehension.IterRange())
+ w.append(",")
+ w.appendLine()
+ w.append("// Accumulator")
+ w.appendLine()
+ w.append(comprehension.AccuVar())
+ w.append(",")
+ w.appendLine()
+ w.append("// Init")
+ w.appendLine()
+ w.Buffer(comprehension.AccuInit())
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopCondition")
+ w.appendLine()
+ w.Buffer(comprehension.LoopCondition())
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopStep")
+ w.appendLine()
+ w.Buffer(comprehension.LoopStep())
+ w.append(",")
+ w.appendLine()
+ w.append("// Result")
+ w.appendLine()
+ w.Buffer(comprehension.Result())
+ w.append(")")
+ w.removeIndent()
+}
+
+func formatLiteral(c ref.Val) string {
+ switch v := c.(type) {
+ case types.Bool:
+ return fmt.Sprintf("%t", v)
+ case types.Bytes:
+ return fmt.Sprintf("b\"%s\"", string(v))
+ case types.Double:
+ return fmt.Sprintf("%v", float64(v))
+ case types.Int:
+ return fmt.Sprintf("%d", int64(v))
+ case types.String:
+ return strconv.Quote(string(v))
+ case types.Uint:
+ return fmt.Sprintf("%du", uint64(v))
+ case types.Null:
+ return "null"
+ default:
+ panic("Unknown constant type")
+ }
+}
+
+func (w *debugWriter) append(s string) {
+ w.doIndent()
+ w.buffer.WriteString(s)
+}
+
+func (w *debugWriter) appendFormat(f string, args ...any) {
+ w.append(fmt.Sprintf(f, args...))
+}
+
+func (w *debugWriter) doIndent() {
+ if w.lineStart {
+ w.lineStart = false
+ w.buffer.WriteString(strings.Repeat(" ", w.indent))
+ }
+}
+
+func (w *debugWriter) adorn(e any) {
+ w.append(w.adorner.GetMetadata(e))
+}
+
+func (w *debugWriter) appendLine() {
+ w.buffer.WriteString("\n")
+ w.lineStart = true
+}
+
+func (w *debugWriter) addIndent() {
+ w.indent++
+}
+
+func (w *debugWriter) removeIndent() {
+ w.indent--
+ if w.indent < 0 {
+ panic("negative indent")
+ }
+}
+
+func (w *debugWriter) String() string {
+ return w.buffer.String()
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
new file mode 100644
index 0000000000..17791dce6a
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
@@ -0,0 +1,39 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "decls.go",
+ ],
+ importpath = "github.com/google/cel-go/common/decls",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "decls_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/decls/decls.go b/hack/tools/vendor/github.com/google/cel-go/common/decls/decls.go
new file mode 100644
index 0000000000..f67808febe
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/decls/decls.go
@@ -0,0 +1,846 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls contains function and variable declaration structs and helper methods.
+package decls
+
+import (
+ "fmt"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// NewFunction creates a new function declaration with a set of function options to configure overloads
+// and function definitions (implementations).
+//
+// Functions are checked for name collisions and singleton redefinition.
+func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) {
+ fn := &FunctionDecl{
+ name: name,
+ overloads: map[string]*OverloadDecl{},
+ overloadOrdinals: []string{},
+ }
+ var err error
+ for _, opt := range opts {
+ fn, err = opt(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(fn.overloads) == 0 {
+ return nil, fmt.Errorf("function %s must have at least one overload", name)
+ }
+ return fn, nil
+}
+
+// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all
+// overload instances.
+type FunctionDecl struct {
+ name string
+
+ // overloads associated with the function name.
+ overloads map[string]*OverloadDecl
+
+ // singleton implementation of the function for all overloads.
+ //
+ // If this option is set, an error will occur if any overloads specify a per-overload implementation
+ // or if another function with the same name attempts to redefine the singleton.
+ singleton *functions.Overload
+
+ // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could
+ // add overhead on common operations. Setting this option true leaves error checks and argument checks
+ // intact.
+ disableTypeGuards bool
+
+ // state indicates that the binding should be provided as a declaration, as a runtime binding, or both.
+ state declarationState
+
+ // overloadOrdinals indicates the order in which the overload was declared.
+ overloadOrdinals []string
+}
+
+type declarationState int
+
+const (
+ declarationStateUnset declarationState = iota
+ declarationDisabled
+ declarationEnabled
+)
+
+// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least'
+func (f *FunctionDecl) Name() string {
+ if f == nil {
+ return ""
+ }
+ return f.name
+}
+
+// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the
+// declaration should not be exposed for use in expressions.
+func (f *FunctionDecl) IsDeclarationDisabled() bool {
+ return f.state == declarationDisabled
+}
+
+// Merge combines an existing function declaration with another.
+//
+// If a function is extended, by say adding new overloads to an existing function, then it is merged with the
+// prior definition of the function at which point its overloads must not collide with pre-existing overloads
+// and its bindings (singleton, or per-overload) must not conflict with previous definitions either.
+func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
+ if f == other {
+ return f, nil
+ }
+ if f.Name() != other.Name() {
+ return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name())
+ }
+ merged := &FunctionDecl{
+ name: f.Name(),
+ overloads: make(map[string]*OverloadDecl, len(f.overloads)),
+ singleton: f.singleton,
+ overloadOrdinals: make([]string, len(f.overloads)),
+ // if one function is expecting type-guards and the other is not, then they
+ // must not be disabled.
+ disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards,
+ // default to the current functions declaration state.
+ state: f.state,
+ }
+ // If the other state indicates that the declaration should be explicitly enabled or
+ // disabled, then update the merged state with the most recent value.
+ if other.state != declarationStateUnset {
+ merged.state = other.state
+ }
+ // baseline copy of the overloads and their ordinals
+ copy(merged.overloadOrdinals, f.overloadOrdinals)
+ for oID, o := range f.overloads {
+ merged.overloads[oID] = o
+ }
+ // overloads and their ordinals are added from the left
+ for _, oID := range other.overloadOrdinals {
+ o := other.overloads[oID]
+ err := merged.AddOverload(o)
+ if err != nil {
+ return nil, fmt.Errorf("function declaration merge failed: %v", err)
+ }
+ }
+ if other.singleton != nil {
+ if merged.singleton != nil && merged.singleton != other.singleton {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ merged.singleton = other.singleton
+ }
+ return merged, nil
+}
+
+// AddOverload ensures that the new overload does not collide with an existing overload signature;
+// however, if the function signatures are identical, the implementation may be rewritten as its
+// difficult to compare functions by object identity.
+func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
+ if f == nil {
+ return fmt.Errorf("nil function cannot add overload: %s", overload.ID())
+ }
+ for oID, o := range f.overloads {
+ if oID != overload.ID() && o.SignatureOverlaps(overload) {
+ return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID())
+ }
+ if oID == overload.ID() {
+ if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() {
+ // Allow redefinition of an overload implementation so long as the signatures match.
+ if overload.hasBinding() {
+ f.overloads[oID] = overload
+ }
+ return nil
+ }
+ return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID)
+ }
+ }
+ f.overloadOrdinals = append(f.overloadOrdinals, overload.ID())
+ f.overloads[overload.ID()] = overload
+ return nil
+}
+
+// OverloadDecls returns the overload declarations in the order in which they were declared.
+func (f *FunctionDecl) OverloadDecls() []*OverloadDecl {
+ if f == nil {
+ return []*OverloadDecl{}
+ }
+ overloads := make([]*OverloadDecl, 0, len(f.overloads))
+ for _, oID := range f.overloadOrdinals {
+ overloads = append(overloads, f.overloads[oID])
+ }
+ return overloads
+}
+
+// Bindings produces a set of function bindings, if any are defined.
+func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
+ if f == nil {
+ return []*functions.Overload{}, nil
+ }
+ overloads := []*functions.Overload{}
+ nonStrict := false
+ for _, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ if o.hasBinding() {
+ overload := &functions.Overload{
+ Operator: o.ID(),
+ Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards),
+ Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards),
+ Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards),
+ OperandTrait: o.OperandTrait(),
+ NonStrict: o.IsNonStrict(),
+ }
+ overloads = append(overloads, overload)
+ nonStrict = nonStrict || o.IsNonStrict()
+ }
+ }
+ if f.singleton != nil {
+ if len(overloads) != 0 {
+ return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name())
+ }
+ overloads = []*functions.Overload{
+ {
+ Operator: f.Name(),
+ Unary: f.singleton.Unary,
+ Binary: f.singleton.Binary,
+ Function: f.singleton.Function,
+ OperandTrait: f.singleton.OperandTrait,
+ },
+ }
+ // fall-through to return single overload case.
+ }
+ if len(overloads) == 0 {
+ return overloads, nil
+ }
+ // Single overload. Replicate an entry for it using the function name as well.
+ if len(overloads) == 1 {
+ if overloads[0].Operator == f.Name() {
+ return overloads, nil
+ }
+ return append(overloads, &functions.Overload{
+ Operator: f.Name(),
+ Unary: overloads[0].Unary,
+ Binary: overloads[0].Binary,
+ Function: overloads[0].Function,
+ NonStrict: overloads[0].NonStrict,
+ OperandTrait: overloads[0].OperandTrait,
+ }), nil
+ }
+ // All of the defined overloads are wrapped into a top-level function which
+ // performs dynamic dispatch to the proper overload based on the argument types.
+ bindings := append([]*functions.Overload{}, overloads...)
+ funcDispatch := func(args ...ref.Val) ref.Val {
+ for _, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ // During dynamic dispatch over multiple functions, signature agreement checks
+ // are preserved in order to assist with the function resolution step.
+ switch len(args) {
+ case 1:
+ if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
+ return o.unaryOp(args[0])
+ }
+ case 2:
+ if o.binaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
+ return o.binaryOp(args[0], args[1])
+ }
+ }
+ if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
+ return o.functionOp(args...)
+ }
+ // eventually this will fall through to the noSuchOverload below.
+ }
+ return MaybeNoSuchOverload(f.Name(), args...)
+ }
+ function := &functions.Overload{
+ Operator: f.Name(),
+ Function: funcDispatch,
+ NonStrict: nonStrict,
+ }
+ return append(bindings, function), nil
+}
+
+// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or
+// to return an unknown set, or to produce a new error for a missing function signature.
+func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val {
+ argTypes := make([]string, len(args))
+ var unk *types.Unknown = nil
+ for i, arg := range args {
+ if types.IsError(arg) {
+ return arg
+ }
+ if types.IsUnknown(arg) {
+ unk = types.MergeUnknowns(arg.(*types.Unknown), unk)
+ }
+ argTypes[i] = arg.Type().TypeName()
+ }
+ if unk != nil {
+ return unk
+ }
+ signature := strings.Join(argTypes, ", ")
+ return types.NewErr("no such overload: %s(%s)", funcName, signature)
+}
+
+// FunctionOpt defines a functional option for mutating a function declaration.
+type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error)
+
+// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls.
+// Type guards remain on during dynamic dispatch for parsed-only expressions.
+func DisableTypeGuards(value bool) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ fn.disableTypeGuards = value
+ return fn, nil
+ }
+}
+
+// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function
+// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations
+// of function declarations while still preserving the runtime behavior for previously compiled expressions.
+func DisableDeclaration(value bool) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ if value {
+ fn.state = declarationDisabled
+ } else {
+ fn.state = declarationEnabled
+ }
+ return fn, nil
+ }
+}
+
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Unary: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Binary: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Function: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// Overload defines a new global overload with an overload id, argument types, and result type. Through the
+// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
+// be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func Overload(overloadID string,
+ args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return newOverload(overloadID, false, args, resultType, opts...)
+}
+
+// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
+// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
+// an operand trait, and to be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func MemberOverload(overloadID string,
+ args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return newOverload(overloadID, true, args, resultType, opts...)
+}
+
+func newOverload(overloadID string,
+ memberFunction bool, args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = f.AddOverload(overload)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+ }
+}
+
+func newOverloadInternal(overloadID string,
+ memberFunction bool, args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) (*OverloadDecl, error) {
+ overload := &OverloadDecl{
+ id: overloadID,
+ argTypes: args,
+ resultType: resultType,
+ isMemberFunction: memberFunction,
+ }
+ var err error
+ for _, opt := range opts {
+ overload, err = opt(overload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return overload, nil
+}
+
+// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional
+// implementation.
+type OverloadDecl struct {
+ id string
+ argTypes []*types.Type
+ resultType *types.Type
+ isMemberFunction bool
+ // nonStrict indicates that the function will accept error and unknown arguments as inputs.
+ nonStrict bool
+ // operandTrait indicates whether the member argument should have a specific type-trait.
+ //
+ // This is useful for creating overloads which operate on a type-interface rather than a concrete type.
+ operandTrait int
+
+ // Function implementation options. Optional, but encouraged.
+ // unaryOp is a function binding that takes a single argument.
+ unaryOp functions.UnaryOp
+ // binaryOp is a function binding that takes two arguments.
+ binaryOp functions.BinaryOp
+ // functionOp is a catch-all for zero-arity and three-plus arity functions.
+ functionOp functions.FunctionOp
+}
+
+// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker
+// and interpreter to optimize performance.
+//
+// The ID format is usually one of two styles:
+// global: __
+// member: ___
+func (o *OverloadDecl) ID() string {
+ if o == nil {
+ return ""
+ }
+ return o.id
+}
+
+// ArgTypes contains the set of argument types expected by the overload.
+//
+// For member functions ArgTypes[0] represents the member operand type.
+func (o *OverloadDecl) ArgTypes() []*types.Type {
+ if o == nil {
+ return emptyArgs
+ }
+ return o.argTypes
+}
+
+// IsMemberFunction indicates whether the overload is a member function
+func (o *OverloadDecl) IsMemberFunction() bool {
+ if o == nil {
+ return false
+ }
+ return o.isMemberFunction
+}
+
+// IsNonStrict returns whether the overload accepts errors and unknown values as arguments.
+func (o *OverloadDecl) IsNonStrict() bool {
+ if o == nil {
+ return false
+ }
+ return o.nonStrict
+}
+
+// OperandTrait returns the trait mask of the first operand to the overload call, e.g.
+// `traits.Indexer`
+func (o *OverloadDecl) OperandTrait() int {
+ if o == nil {
+ return 0
+ }
+ return o.operandTrait
+}
+
+// ResultType indicates the output type from calling the function.
+func (o *OverloadDecl) ResultType() *types.Type {
+ if o == nil {
+ // *types.Type is nil-safe
+ return nil
+ }
+ return o.resultType
+}
+
+// TypeParams returns the type parameter names associated with the overload.
+func (o *OverloadDecl) TypeParams() []string {
+ typeParams := map[string]struct{}{}
+ collectParamNames(typeParams, o.ResultType())
+ for _, arg := range o.ArgTypes() {
+ collectParamNames(typeParams, arg)
+ }
+ params := make([]string, 0, len(typeParams))
+ for param := range typeParams {
+ params = append(params, param)
+ }
+ return params
+}
+
+// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature.
+//
+// Result type, operand trait, and strict-ness are not considered as part of signature equality.
+func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool {
+ if o == other {
+ return true
+ }
+ if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
+ return false
+ }
+ for i, at := range o.ArgTypes() {
+ oat := other.ArgTypes()[i]
+ if !at.IsEquivalentType(oat) {
+ return false
+ }
+ }
+ return o.ResultType().IsEquivalentType(other.ResultType())
+}
+
+// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures.
+//
+// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type.
+func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool {
+ if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
+ return false
+ }
+ argsOverlap := true
+ for i, argType := range o.ArgTypes() {
+ otherArgType := other.ArgTypes()[i]
+ argsOverlap = argsOverlap &&
+ (argType.IsAssignableType(otherArgType) ||
+ otherArgType.IsAssignableType(argType))
+ }
+ return argsOverlap
+}
+
+// hasBinding indicates whether the overload already has a definition.
+func (o *OverloadDecl) hasBinding() bool {
+ return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil)
+}
+
+// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined.
+func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp {
+ if o.unaryOp == nil {
+ return nil
+ }
+ return func(arg ref.Val) ref.Val {
+ if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) {
+ return MaybeNoSuchOverload(funcName, arg)
+ }
+ return o.unaryOp(arg)
+ }
+}
+
+// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined.
+func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp {
+ if o.binaryOp == nil {
+ return nil
+ }
+ return func(arg1, arg2 ref.Val) ref.Val {
+ if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) {
+ return MaybeNoSuchOverload(funcName, arg1, arg2)
+ }
+ return o.binaryOp(arg1, arg2)
+ }
+}
+
+// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided.
+func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp {
+ if o.functionOp == nil {
+ return nil
+ }
+ return func(args ...ref.Val) ref.Val {
+ if !o.matchesRuntimeSignature(disableTypeGuards, args...) {
+ return MaybeNoSuchOverload(funcName, args...)
+ }
+ return o.functionOp(args...)
+ }
+}
+
+// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument.
+func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool {
+ return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) &&
+ matchOperandTrait(o.OperandTrait(), arg)
+}
+
+// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
+func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool {
+ return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) &&
+ matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) &&
+ matchOperandTrait(o.OperandTrait(), arg1)
+}
+
+// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
+func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool {
+ if len(args) != len(o.ArgTypes()) {
+ return false
+ }
+ if len(args) == 0 {
+ return true
+ }
+ for i, arg := range args {
+ if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) {
+ return false
+ }
+ }
+ return matchOperandTrait(o.OperandTrait(), args[0])
+}
+
+func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool {
+ if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) {
+ return true
+ }
+ if types.IsUnknownOrError(arg) {
+ return false
+ }
+ return disableTypeGuards || argType.IsAssignableRuntimeType(arg)
+}
+
+func matchOperandTrait(trait int, arg ref.Val) bool {
+ return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg)
+}
+
+// OverloadOpt is a functional option for configuring a function overload.
+type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error)
+
+// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ if len(o.ArgTypes()) != 1 {
+ return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID())
+ }
+ o.unaryOp = binding
+ return o, nil
+ }
+}
+
+// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ if len(o.ArgTypes()) != 2 {
+ return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID())
+ }
+ o.binaryOp = binding
+ return o, nil
+ }
+}
+
+// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ o.functionOp = binding
+ return o, nil
+ }
+}
+
+// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
+//
+// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
+func OverloadIsNonStrict() OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.nonStrict = true
+ return o, nil
+ }
+}
+
+// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
+// successfully invoked.
+func OverloadOperandTrait(trait int) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.operandTrait = trait
+ return o, nil
+ }
+}
+
+// NewConstant creates a new constant declaration.
+func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl {
+ return &VariableDecl{name: name, varType: t, value: v}
+}
+
+// NewVariable creates a new variable declaration.
+func NewVariable(name string, t *types.Type) *VariableDecl {
+ return &VariableDecl{name: name, varType: t}
+}
+
+// VariableDecl defines a variable declaration which may optionally have a constant value.
+type VariableDecl struct {
+ name string
+ varType *types.Type
+ value ref.Val
+}
+
+// Name returns the fully-qualified variable name
+func (v *VariableDecl) Name() string {
+ if v == nil {
+ return ""
+ }
+ return v.name
+}
+
+// Type returns the types.Type value associated with the variable.
+func (v *VariableDecl) Type() *types.Type {
+ if v == nil {
+ // types.Type is nil-safe
+ return nil
+ }
+ return v.varType
+}
+
+// Value returns the constant value associated with the declaration.
+func (v *VariableDecl) Value() ref.Val {
+ if v == nil {
+ return nil
+ }
+ return v.value
+}
+
+// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input.
+func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool {
+ if v == other {
+ return true
+ }
+ return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type())
+}
+
+// TypeVariable creates a new type identifier for use within a types.Provider
+func TypeVariable(t *types.Type) *VariableDecl {
+ return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t))
+}
+
+// variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration.
+func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) {
+ varType, err := types.TypeToExprType(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewVar(v.Name(), varType), nil
+}
+
+// functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration.
+func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
+ overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads))
+ for i, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ paramNames := map[string]struct{}{}
+ argTypes := make([]*exprpb.Type, len(o.ArgTypes()))
+ for j, a := range o.ArgTypes() {
+ collectParamNames(paramNames, a)
+ at, err := types.TypeToExprType(a)
+ if err != nil {
+ return nil, err
+ }
+ argTypes[j] = at
+ }
+ collectParamNames(paramNames, o.ResultType())
+ resultType, err := types.TypeToExprType(o.ResultType())
+ if err != nil {
+ return nil, err
+ }
+ if len(paramNames) == 0 {
+ if o.IsMemberFunction() {
+ overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType)
+ } else {
+ overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType)
+ }
+ } else {
+ params := []string{}
+ for pn := range paramNames {
+ params = append(params, pn)
+ }
+ if o.IsMemberFunction() {
+ overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params)
+ } else {
+ overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params)
+ }
+ }
+ }
+ return chkdecls.NewFunction(f.Name(), overloads...), nil
+}
+
+func collectParamNames(paramNames map[string]struct{}, arg *types.Type) {
+ if arg.Kind() == types.TypeParamKind {
+ paramNames[arg.TypeName()] = struct{}{}
+ }
+ for _, param := range arg.Parameters() {
+ collectParamNames(paramNames, param)
+ }
+}
+
+var (
+ emptyArgs = []*types.Type{}
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/doc.go b/hack/tools/vendor/github.com/google/cel-go/common/doc.go
new file mode 100644
index 0000000000..5362fdfe4b
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package common defines types and utilities common to expression parsing,
+// checking, and interpretation
+package common
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/error.go b/hack/tools/vendor/github.com/google/cel-go/common/error.go
new file mode 100644
index 0000000000..0cf21345e6
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/error.go
@@ -0,0 +1,74 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// NewError creates an error associated with an expression id with the given message at the given location.
+func NewError(id int64, message string, location Location) *Error {
+ return &Error{Message: message, Location: location, ExprID: id}
+}
+
+// Error type which references an expression id, a location within source, and a message.
+type Error struct {
+ Location Location
+ Message string
+ ExprID int64
+}
+
+const (
+ dot = "."
+ ind = "^"
+ wideDot = "\uff0e"
+ wideInd = "\uff3e"
+
+ // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet.
+ maxSnippetLength = 16384
+)
+
+// ToDisplayString decorates the error message with the source location.
+func (e *Error) ToDisplayString(source Source) string {
+ var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
+ source.Description(),
+ e.Location.Line(),
+ e.Location.Column()+1, // add one to the 0-based column for display
+ e.Message)
+ if snippet, found := source.Snippet(e.Location.Line()); found && len(snippet) <= maxSnippetLength {
+ snippet := strings.Replace(snippet, "\t", " ", -1)
+ srcLine := "\n | " + snippet
+ var bytes = []byte(snippet)
+ var indLine = "\n | "
+ for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ {
+ _, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ if sz > 1 {
+ indLine += wideDot
+ } else {
+ indLine += dot
+ }
+ }
+ if _, sz := utf8.DecodeRune(bytes); sz > 1 {
+ indLine += wideInd
+ } else {
+ indLine += ind
+ }
+ result += srcLine + indLine
+ }
+ return result
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/errors.go b/hack/tools/vendor/github.com/google/cel-go/common/errors.go
new file mode 100644
index 0000000000..25adc73d8e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/errors.go
@@ -0,0 +1,103 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Errors type which contains a list of errors observed during parsing.
+type Errors struct {
+ errors []*Error
+ source Source
+ numErrors int
+ maxErrorsToReport int
+}
+
+// NewErrors creates a new instance of the Errors type.
+func NewErrors(source Source) *Errors {
+ return &Errors{
+ errors: []*Error{},
+ source: source,
+ maxErrorsToReport: 100,
+ }
+}
+
+// ReportError records an error at a source location.
+func (e *Errors) ReportError(l Location, format string, args ...any) {
+ e.ReportErrorAtID(0, l, format, args...)
+}
+
+// ReportErrorAtID records an error at a source location and expression id.
+func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) {
+ e.numErrors++
+ if e.numErrors > e.maxErrorsToReport {
+ return
+ }
+ err := &Error{
+ ExprID: id,
+ Location: l,
+ Message: fmt.Sprintf(format, args...),
+ }
+ e.errors = append(e.errors, err)
+}
+
+// GetErrors returns the list of observed errors.
+func (e *Errors) GetErrors() []*Error {
+ return e.errors[:]
+}
+
+// Append creates a new Errors object with the current and input errors.
+func (e *Errors) Append(errs []*Error) *Errors {
+ return &Errors{
+ errors: append(e.errors[:], errs...),
+ source: e.source,
+ numErrors: e.numErrors + len(errs),
+ maxErrorsToReport: e.maxErrorsToReport,
+ }
+}
+
+// ToDisplayString returns the error set to a newline delimited string.
+func (e *Errors) ToDisplayString() string {
+ errorsInString := e.maxErrorsToReport
+ if e.numErrors > e.maxErrorsToReport {
+ // add one more error to indicate the number of errors truncated.
+ errorsInString++
+ } else {
+ // otherwise the error set will just contain the number of errors.
+ errorsInString = e.numErrors
+ }
+
+ result := make([]string, errorsInString)
+ sort.SliceStable(e.errors, func(i, j int) bool {
+ ei := e.errors[i].Location
+ ej := e.errors[j].Location
+ return ei.Line() < ej.Line() ||
+ (ei.Line() == ej.Line() && ei.Column() < ej.Column())
+ })
+ for i, err := range e.errors {
+ // This can happen during the append of two errors objects
+ if i >= e.maxErrorsToReport {
+ break
+ }
+ result[i] = err.ToDisplayString(e.source)
+ }
+ if e.numErrors > e.maxErrorsToReport {
+ result[e.maxErrorsToReport] = fmt.Sprintf("%d more errors were truncated", e.numErrors-e.maxErrorsToReport)
+ }
+ return strings.Join(result, "\n")
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/functions/BUILD.bazel
new file mode 100644
index 0000000000..3cc27d60ce
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/functions/BUILD.bazel
@@ -0,0 +1,17 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "functions.go",
+ ],
+ importpath = "github.com/google/cel-go/common/functions",
+ deps = [
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/functions/functions.go b/hack/tools/vendor/github.com/google/cel-go/common/functions/functions.go
new file mode 100644
index 0000000000..67f4a5944e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/functions/functions.go
@@ -0,0 +1,61 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package functions defines the standard builtin functions supported by the interpreter
+package functions
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Overload defines a named overload of a function, indicating an operand trait
+// which must be present on the first argument to the overload as well as one
+// of either a unary, binary, or function implementation.
+//
+// The majority of operators within the expression language are unary or binary
+// and the specializations simplify the call contract for implementers of
+// types with operator overloads. Any added complexity is assumed to be handled
+// by the generic FunctionOp.
+type Overload struct {
+ // Operator name as written in an expression or defined within
+ // operators.go.
+ Operator string
+
+ // Operand trait used to dispatch the call. The zero-value indicates a
+ // global function overload or that one of the Unary / Binary / Function
+ // definitions should be used to execute the call.
+ OperandTrait int
+
+ // Unary defines the overload with a UnaryOp implementation. May be nil.
+ Unary UnaryOp
+
+ // Binary defines the overload with a BinaryOp implementation. May be nil.
+ Binary BinaryOp
+
+ // Function defines the overload with a FunctionOp implementation. May be
+ // nil.
+ Function FunctionOp
+
+ // NonStrict specifies whether the Overload will tolerate arguments that
+ // are types.Err or types.Unknown.
+ NonStrict bool
+}
+
+// UnaryOp is a function that takes a single value and produces an output.
+type UnaryOp func(value ref.Val) ref.Val
+
+// BinaryOp is a function that takes two values and produces an output.
+type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
+
+// FunctionOp is a function with accepts zero or more arguments and produces
+// a value or error as a result.
+type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/location.go b/hack/tools/vendor/github.com/google/cel-go/common/location.go
new file mode 100644
index 0000000000..ec3fa7cb50
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/location.go
@@ -0,0 +1,51 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+// Location interface to represent a location within Source.
+type Location interface {
+ Line() int // 1-based line number within source.
+ Column() int // 0-based column number within source.
+}
+
+// SourceLocation helper type to manually construct a location.
+type SourceLocation struct {
+ line int
+ column int
+}
+
+var (
+ // Location implements the SourceLocation interface.
+ _ Location = &SourceLocation{}
+ // NoLocation is a particular illegal location.
+ NoLocation = &SourceLocation{-1, -1}
+)
+
+// NewLocation creates a new location.
+func NewLocation(line, column int) Location {
+ return &SourceLocation{
+ line: line,
+ column: column}
+}
+
+// Line returns the 1-based line of the location.
+func (l *SourceLocation) Line() int {
+ return l.line
+}
+
+// Column returns the 0-based column number of the location.
+func (l *SourceLocation) Column() int {
+ return l.column
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
new file mode 100644
index 0000000000..b5b67f0623
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "operators.go",
+ ],
+ importpath = "github.com/google/cel-go/common/operators",
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/operators/operators.go b/hack/tools/vendor/github.com/google/cel-go/common/operators/operators.go
new file mode 100644
index 0000000000..f9b39bda3f
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/operators/operators.go
@@ -0,0 +1,157 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package operators defines the internal function names of operators.
+//
+// All operators in the expression language are modelled as function calls.
+package operators
+
+// String "names" for CEL operators.
+const (
+ // Symbolic operators.
+ Conditional = "_?_:_"
+ LogicalAnd = "_&&_"
+ LogicalOr = "_||_"
+ LogicalNot = "!_"
+ Equals = "_==_"
+ NotEquals = "_!=_"
+ Less = "_<_"
+ LessEquals = "_<=_"
+ Greater = "_>_"
+ GreaterEquals = "_>=_"
+ Add = "_+_"
+ Subtract = "_-_"
+ Multiply = "_*_"
+ Divide = "_/_"
+ Modulo = "_%_"
+ Negate = "-_"
+ Index = "_[_]"
+ OptIndex = "_[?_]"
+ OptSelect = "_?._"
+
+ // Macros, must have a valid identifier.
+ Has = "has"
+ All = "all"
+ Exists = "exists"
+ ExistsOne = "exists_one"
+ Map = "map"
+ Filter = "filter"
+
+ // Named operators, must not have be valid identifiers.
+ NotStrictlyFalse = "@not_strictly_false"
+ In = "@in"
+
+ // Deprecated: named operators with valid identifiers.
+ OldNotStrictlyFalse = "__not_strictly_false__"
+ OldIn = "_in_"
+)
+
+var (
+ operators = map[string]string{
+ "+": Add,
+ "/": Divide,
+ "==": Equals,
+ ">": Greater,
+ ">=": GreaterEquals,
+ "in": In,
+ "<": Less,
+ "<=": LessEquals,
+ "%": Modulo,
+ "*": Multiply,
+ "!=": NotEquals,
+ "-": Subtract,
+ }
+ // operatorMap of the operator symbol which refers to a struct containing the display name,
+ // if applicable, the operator precedence, and the arity.
+ //
+ // If the symbol does not have a display name listed in the map, it is only because it requires
+ // special casing to render properly as text.
+ operatorMap = map[string]struct {
+ displayName string
+ precedence int
+ arity int
+ }{
+ Conditional: {displayName: "", precedence: 8, arity: 3},
+ LogicalOr: {displayName: "||", precedence: 7, arity: 2},
+ LogicalAnd: {displayName: "&&", precedence: 6, arity: 2},
+ Equals: {displayName: "==", precedence: 5, arity: 2},
+ Greater: {displayName: ">", precedence: 5, arity: 2},
+ GreaterEquals: {displayName: ">=", precedence: 5, arity: 2},
+ In: {displayName: "in", precedence: 5, arity: 2},
+ Less: {displayName: "<", precedence: 5, arity: 2},
+ LessEquals: {displayName: "<=", precedence: 5, arity: 2},
+ NotEquals: {displayName: "!=", precedence: 5, arity: 2},
+ OldIn: {displayName: "in", precedence: 5, arity: 2},
+ Add: {displayName: "+", precedence: 4, arity: 2},
+ Subtract: {displayName: "-", precedence: 4, arity: 2},
+ Divide: {displayName: "/", precedence: 3, arity: 2},
+ Modulo: {displayName: "%", precedence: 3, arity: 2},
+ Multiply: {displayName: "*", precedence: 3, arity: 2},
+ LogicalNot: {displayName: "!", precedence: 2, arity: 1},
+ Negate: {displayName: "-", precedence: 2, arity: 1},
+ Index: {displayName: "", precedence: 1, arity: 2},
+ OptIndex: {displayName: "", precedence: 1, arity: 2},
+ OptSelect: {displayName: "", precedence: 1, arity: 2},
+ }
+)
+
+// Find the internal function name for an operator, if the input text is one.
+func Find(text string) (string, bool) {
+ op, found := operators[text]
+ return op, found
+}
+
+// FindReverse returns the unmangled, text representation of the operator.
+func FindReverse(symbol string) (string, bool) {
+ op, found := operatorMap[symbol]
+ if !found {
+ return "", false
+ }
+ return op.displayName, true
+}
+
+// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator.
+//
+// If the symbol does refer to an operator, but the operator does not have a display name the
+// result is false.
+func FindReverseBinaryOperator(symbol string) (string, bool) {
+ op, found := operatorMap[symbol]
+ if !found || op.arity != 2 {
+ return "", false
+ }
+ if op.displayName == "" {
+ return "", false
+ }
+ return op.displayName, true
+}
+
+// Precedence returns the operator precedence, where the higher the number indicates
+// higher precedence operations.
+func Precedence(symbol string) int {
+ op, found := operatorMap[symbol]
+ if !found {
+ return 0
+ }
+ return op.precedence
+}
+
+// Arity returns the number of argument the operator takes
+// -1 is returned if an undefined symbol is provided
+func Arity(symbol string) int {
+ op, found := operatorMap[symbol]
+ if !found {
+ return -1
+ }
+ return op.arity
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
new file mode 100644
index 0000000000..e46e2f4830
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "overloads.go",
+ ],
+ importpath = "github.com/google/cel-go/common/overloads",
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/overloads/overloads.go b/hack/tools/vendor/github.com/google/cel-go/common/overloads/overloads.go
new file mode 100644
index 0000000000..9d50f4367b
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/overloads/overloads.go
@@ -0,0 +1,327 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package overloads defines the internal overload identifiers for function and
+// operator overloads.
+package overloads
+
+// Boolean logic overloads
+const (
+ Conditional = "conditional"
+ LogicalAnd = "logical_and"
+ LogicalOr = "logical_or"
+ LogicalNot = "logical_not"
+ NotStrictlyFalse = "not_strictly_false"
+ Equals = "equals"
+ NotEquals = "not_equals"
+ LessBool = "less_bool"
+ LessInt64 = "less_int64"
+ LessInt64Double = "less_int64_double"
+ LessInt64Uint64 = "less_int64_uint64"
+ LessUint64 = "less_uint64"
+ LessUint64Double = "less_uint64_double"
+ LessUint64Int64 = "less_uint64_int64"
+ LessDouble = "less_double"
+ LessDoubleInt64 = "less_double_int64"
+ LessDoubleUint64 = "less_double_uint64"
+ LessString = "less_string"
+ LessBytes = "less_bytes"
+ LessTimestamp = "less_timestamp"
+ LessDuration = "less_duration"
+ LessEqualsBool = "less_equals_bool"
+ LessEqualsInt64 = "less_equals_int64"
+ LessEqualsInt64Double = "less_equals_int64_double"
+ LessEqualsInt64Uint64 = "less_equals_int64_uint64"
+ LessEqualsUint64 = "less_equals_uint64"
+ LessEqualsUint64Double = "less_equals_uint64_double"
+ LessEqualsUint64Int64 = "less_equals_uint64_int64"
+ LessEqualsDouble = "less_equals_double"
+ LessEqualsDoubleInt64 = "less_equals_double_int64"
+ LessEqualsDoubleUint64 = "less_equals_double_uint64"
+ LessEqualsString = "less_equals_string"
+ LessEqualsBytes = "less_equals_bytes"
+ LessEqualsTimestamp = "less_equals_timestamp"
+ LessEqualsDuration = "less_equals_duration"
+ GreaterBool = "greater_bool"
+ GreaterInt64 = "greater_int64"
+ GreaterInt64Double = "greater_int64_double"
+ GreaterInt64Uint64 = "greater_int64_uint64"
+ GreaterUint64 = "greater_uint64"
+ GreaterUint64Double = "greater_uint64_double"
+ GreaterUint64Int64 = "greater_uint64_int64"
+ GreaterDouble = "greater_double"
+ GreaterDoubleInt64 = "greater_double_int64"
+ GreaterDoubleUint64 = "greater_double_uint64"
+ GreaterString = "greater_string"
+ GreaterBytes = "greater_bytes"
+ GreaterTimestamp = "greater_timestamp"
+ GreaterDuration = "greater_duration"
+ GreaterEqualsBool = "greater_equals_bool"
+ GreaterEqualsInt64 = "greater_equals_int64"
+ GreaterEqualsInt64Double = "greater_equals_int64_double"
+ GreaterEqualsInt64Uint64 = "greater_equals_int64_uint64"
+ GreaterEqualsUint64 = "greater_equals_uint64"
+ GreaterEqualsUint64Double = "greater_equals_uint64_double"
+ GreaterEqualsUint64Int64 = "greater_equals_uint64_int64"
+ GreaterEqualsDouble = "greater_equals_double"
+ GreaterEqualsDoubleInt64 = "greater_equals_double_int64"
+ GreaterEqualsDoubleUint64 = "greater_equals_double_uint64"
+ GreaterEqualsString = "greater_equals_string"
+ GreaterEqualsBytes = "greater_equals_bytes"
+ GreaterEqualsTimestamp = "greater_equals_timestamp"
+ GreaterEqualsDuration = "greater_equals_duration"
+)
+
+// Math overloads
+const (
+ AddInt64 = "add_int64"
+ AddUint64 = "add_uint64"
+ AddDouble = "add_double"
+ AddString = "add_string"
+ AddBytes = "add_bytes"
+ AddList = "add_list"
+ AddTimestampDuration = "add_timestamp_duration"
+ AddDurationTimestamp = "add_duration_timestamp"
+ AddDurationDuration = "add_duration_duration"
+ SubtractInt64 = "subtract_int64"
+ SubtractUint64 = "subtract_uint64"
+ SubtractDouble = "subtract_double"
+ SubtractTimestampTimestamp = "subtract_timestamp_timestamp"
+ SubtractTimestampDuration = "subtract_timestamp_duration"
+ SubtractDurationDuration = "subtract_duration_duration"
+ MultiplyInt64 = "multiply_int64"
+ MultiplyUint64 = "multiply_uint64"
+ MultiplyDouble = "multiply_double"
+ DivideInt64 = "divide_int64"
+ DivideUint64 = "divide_uint64"
+ DivideDouble = "divide_double"
+ ModuloInt64 = "modulo_int64"
+ ModuloUint64 = "modulo_uint64"
+ NegateInt64 = "negate_int64"
+ NegateDouble = "negate_double"
+)
+
+// Index overloads
+const (
+ IndexList = "index_list"
+ IndexMap = "index_map"
+ IndexMessage = "index_message" // TODO: introduce concept of types.Message
+)
+
+// In operators
+const (
+ DeprecatedIn = "in"
+ InList = "in_list"
+ InMap = "in_map"
+ InMessage = "in_message" // TODO: introduce concept of types.Message
+)
+
+// Size overloads
+const (
+ Size = "size"
+ SizeString = "size_string"
+ SizeBytes = "size_bytes"
+ SizeList = "size_list"
+ SizeMap = "size_map"
+ SizeStringInst = "string_size"
+ SizeBytesInst = "bytes_size"
+ SizeListInst = "list_size"
+ SizeMapInst = "map_size"
+)
+
+// String function names.
+const (
+ Contains = "contains"
+ EndsWith = "endsWith"
+ Matches = "matches"
+ StartsWith = "startsWith"
+)
+
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtQuoteString = "strings_quote"
+)
+
+// String function overload names.
+const (
+ ContainsString = "contains_string"
+ EndsWithString = "ends_with_string"
+ MatchesString = "matches_string"
+ StartsWithString = "starts_with_string"
+)
+
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtFormatString = "string_format"
+)
+
+// Time-based functions.
+const (
+ TimeGetFullYear = "getFullYear"
+ TimeGetMonth = "getMonth"
+ TimeGetDayOfYear = "getDayOfYear"
+ TimeGetDate = "getDate"
+ TimeGetDayOfMonth = "getDayOfMonth"
+ TimeGetDayOfWeek = "getDayOfWeek"
+ TimeGetHours = "getHours"
+ TimeGetMinutes = "getMinutes"
+ TimeGetSeconds = "getSeconds"
+ TimeGetMilliseconds = "getMilliseconds"
+)
+
+// Timestamp overloads for time functions without timezones.
+const (
+ TimestampToYear = "timestamp_to_year"
+ TimestampToMonth = "timestamp_to_month"
+ TimestampToDayOfYear = "timestamp_to_day_of_year"
+ TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month"
+ TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based"
+ TimestampToDayOfWeek = "timestamp_to_day_of_week"
+ TimestampToHours = "timestamp_to_hours"
+ TimestampToMinutes = "timestamp_to_minutes"
+ TimestampToSeconds = "timestamp_to_seconds"
+ TimestampToMilliseconds = "timestamp_to_milliseconds"
+)
+
+// Timestamp overloads for time functions with timezones.
+const (
+ TimestampToYearWithTz = "timestamp_to_year_with_tz"
+ TimestampToMonthWithTz = "timestamp_to_month_with_tz"
+ TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz"
+ TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz"
+ TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz"
+ TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz"
+ TimestampToHoursWithTz = "timestamp_to_hours_with_tz"
+ TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz"
+ TimestampToSecondsWithTz = "timestamp_to_seconds_tz"
+ TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz"
+)
+
+// Duration overloads for time functions.
+const (
+ DurationToHours = "duration_to_hours"
+ DurationToMinutes = "duration_to_minutes"
+ DurationToSeconds = "duration_to_seconds"
+ DurationToMilliseconds = "duration_to_milliseconds"
+)
+
+// Type conversion methods and overloads
+const (
+ TypeConvertInt = "int"
+ TypeConvertUint = "uint"
+ TypeConvertDouble = "double"
+ TypeConvertBool = "bool"
+ TypeConvertString = "string"
+ TypeConvertBytes = "bytes"
+ TypeConvertTimestamp = "timestamp"
+ TypeConvertDuration = "duration"
+ TypeConvertType = "type"
+ TypeConvertDyn = "dyn"
+)
+
+// Int conversion functions.
+const (
+ IntToInt = "int64_to_int64"
+ UintToInt = "uint64_to_int64"
+ DoubleToInt = "double_to_int64"
+ StringToInt = "string_to_int64"
+ TimestampToInt = "timestamp_to_int64"
+ DurationToInt = "duration_to_int64"
+)
+
+// Uint conversion functions.
+const (
+ UintToUint = "uint64_to_uint64"
+ IntToUint = "int64_to_uint64"
+ DoubleToUint = "double_to_uint64"
+ StringToUint = "string_to_uint64"
+)
+
+// Double conversion functions.
+const (
+ DoubleToDouble = "double_to_double"
+ IntToDouble = "int64_to_double"
+ UintToDouble = "uint64_to_double"
+ StringToDouble = "string_to_double"
+)
+
+// Bool conversion functions.
+const (
+ BoolToBool = "bool_to_bool"
+ StringToBool = "string_to_bool"
+)
+
+// Bytes conversion functions.
+const (
+ BytesToBytes = "bytes_to_bytes"
+ StringToBytes = "string_to_bytes"
+)
+
+// String conversion functions.
+const (
+ StringToString = "string_to_string"
+ BoolToString = "bool_to_string"
+ IntToString = "int64_to_string"
+ UintToString = "uint64_to_string"
+ DoubleToString = "double_to_string"
+ BytesToString = "bytes_to_string"
+ TimestampToString = "timestamp_to_string"
+ DurationToString = "duration_to_string"
+)
+
+// Timestamp conversion functions
+const (
+ TimestampToTimestamp = "timestamp_to_timestamp"
+ StringToTimestamp = "string_to_timestamp"
+ IntToTimestamp = "int64_to_timestamp"
+)
+
+// Convert duration from string
+const (
+ DurationToDuration = "duration_to_duration"
+ StringToDuration = "string_to_duration"
+ IntToDuration = "int64_to_duration"
+)
+
+// Convert to dyn
+const (
+ ToDyn = "to_dyn"
+)
+
+// Comprehensions helper methods, not directly accessible via a developer.
+const (
+ Iterator = "@iterator"
+ HasNext = "@hasNext"
+ Next = "@next"
+)
+
+// IsTypeConversionFunction returns whether the input function is a standard library type
+// conversion function.
+func IsTypeConversionFunction(function string) bool {
+ switch function {
+ case TypeConvertBool,
+ TypeConvertBytes,
+ TypeConvertDouble,
+ TypeConvertDuration,
+ TypeConvertDyn,
+ TypeConvertInt,
+ TypeConvertString,
+ TypeConvertTimestamp,
+ TypeConvertType,
+ TypeConvertUint:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/runes/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
new file mode 100644
index 0000000000..bb30242cfa
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
@@ -0,0 +1,25 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "buffer.go",
+ ],
+ importpath = "github.com/google/cel-go/common/runes",
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "buffer_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/runes/buffer.go b/hack/tools/vendor/github.com/google/cel-go/common/runes/buffer.go
new file mode 100644
index 0000000000..021198224d
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/runes/buffer.go
@@ -0,0 +1,242 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package runes provides interfaces and utilities for working with runes.
+package runes
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// Buffer is an interface for accessing a contiguous array of code points.
+type Buffer interface {
+ Get(i int) rune
+ Slice(i, j int) string
+ Len() int
+}
+
+type emptyBuffer struct{}
+
+func (e *emptyBuffer) Get(i int) rune {
+ panic("slice index out of bounds")
+}
+
+func (e *emptyBuffer) Slice(i, j int) string {
+ if i != 0 || i != j {
+ panic("slice index out of bounds")
+ }
+ return ""
+}
+
+func (e *emptyBuffer) Len() int {
+ return 0
+}
+
+var _ Buffer = &emptyBuffer{}
+
+// asciiBuffer is an implementation for an array of code points that contain code points only from
+// the ASCII character set.
+type asciiBuffer struct {
+ arr []byte
+}
+
+func (a *asciiBuffer) Get(i int) rune {
+ return rune(uint32(a.arr[i]))
+}
+
+func (a *asciiBuffer) Slice(i, j int) string {
+ return string(a.arr[i:j])
+}
+
+func (a *asciiBuffer) Len() int {
+ return len(a.arr)
+}
+
+var _ Buffer = &asciiBuffer{}
+
+// basicBuffer is an implementation for an array of code points that contain code points from both
+// the Latin-1 character set and Basic Multilingual Plane.
+type basicBuffer struct {
+ arr []uint16
+}
+
+func (b *basicBuffer) Get(i int) rune {
+ return rune(uint32(b.arr[i]))
+}
+
+func (b *basicBuffer) Slice(i, j int) string {
+ var str strings.Builder
+ str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3.
+ for ; i < j; i++ {
+ str.WriteRune(rune(uint32(b.arr[i])))
+ }
+ return str.String()
+}
+
+func (b *basicBuffer) Len() int {
+ return len(b.arr)
+}
+
+var _ Buffer = &basicBuffer{}
+
+// supplementalBuffer is an implementation for an array of code points that contain code points from
+// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane.
+type supplementalBuffer struct {
+ arr []rune
+}
+
+func (s *supplementalBuffer) Get(i int) rune {
+ return rune(uint32(s.arr[i]))
+}
+
+func (s *supplementalBuffer) Slice(i, j int) string {
+ return string(s.arr[i:j])
+}
+
+func (s *supplementalBuffer) Len() int {
+ return len(s.arr)
+}
+
+var _ Buffer = &supplementalBuffer{}
+
+var nilBuffer = &emptyBuffer{}
+
+// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of
+// the encoded code points contained within.
+//
+// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
+// each index represents a code point by itself without needing to use an array of rune. At first
+// we assume all code points are less than or equal to '\u007f'. If this holds true, the
+// underlying storage is a byte array containing only ASCII characters. If we encountered a code
+// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
+// elements of previous byte array to the uint16 array, and continue. If this holds true, the
+// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
+// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
+// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
+// containing any Unicode character.
+func NewBuffer(data string) Buffer {
+ buf, _ := newBuffer(data, false)
+ return buf
+}
+
+// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on
+// the ranges of the encoded code points contained within, as well as returning the line offsets.
+//
+// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
+// each index represents a code point by itself without needing to use an array of rune. At first
+// we assume all code points are less than or equal to '\u007f'. If this holds true, the
+// underlying storage is a byte array containing only ASCII characters. If we encountered a code
+// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
+// elements of previous byte array to the uint16 array, and continue. If this holds true, the
+// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
+// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
+// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
+// containing any Unicode character.
+func NewBufferAndLineOffsets(data string) (Buffer, []int32) {
+ return newBuffer(data, true)
+}
+
+func newBuffer(data string, lines bool) (Buffer, []int32) {
+ if len(data) == 0 {
+ return nilBuffer, []int32{0}
+ }
+ var (
+ idx = 0
+ off int32 = 0
+ buf8 = make([]byte, 0, len(data))
+ buf16 []uint16
+ buf32 []rune
+ offs []int32
+ )
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
+ if r < utf8.RuneSelf {
+ buf8 = append(buf8, byte(r))
+ off++
+ continue
+ }
+ if r <= 0xffff {
+ buf16 = make([]uint16, len(buf8), len(data))
+ for i, v := range buf8 {
+ buf16[i] = uint16(v)
+ }
+ buf8 = nil
+ buf16 = append(buf16, uint16(r))
+ off++
+ goto copy16
+ }
+ buf32 = make([]rune, len(buf8), len(data))
+ for i, v := range buf8 {
+ buf32[i] = rune(uint32(v))
+ }
+ buf8 = nil
+ buf32 = append(buf32, r)
+ off++
+ goto copy32
+ }
+ if lines {
+ offs = append(offs, off+1)
+ }
+ return &asciiBuffer{
+ arr: buf8,
+ }, offs
+copy16:
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
+ if r <= 0xffff {
+ buf16 = append(buf16, uint16(r))
+ off++
+ continue
+ }
+ buf32 = make([]rune, len(buf16), len(data))
+ for i, v := range buf16 {
+ buf32[i] = rune(uint32(v))
+ }
+ buf16 = nil
+ buf32 = append(buf32, r)
+ off++
+ goto copy32
+ }
+ if lines {
+ offs = append(offs, off+1)
+ }
+ return &basicBuffer{
+ arr: buf16,
+ }, offs
+copy32:
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
+ buf32 = append(buf32, r)
+ off++
+ }
+ if lines {
+ offs = append(offs, off+1)
+ }
+ return &supplementalBuffer{
+ arr: buf32,
+ }, offs
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/source.go b/hack/tools/vendor/github.com/google/cel-go/common/source.go
new file mode 100644
index 0000000000..ec79cb5454
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/source.go
@@ -0,0 +1,173 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "github.com/google/cel-go/common/runes"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface for filter source contents.
+type Source interface {
+ // Content returns the source content represented as a string.
+ // Examples contents are the single file contents, textbox field,
+ // or url parameter.
+ Content() string
+
+ // Description gives a brief description of the source.
+ // Example descriptions are a file name or ui element.
+ Description() string
+
+ // LineOffsets gives the character offsets at which lines occur.
+ // The zero-th entry should refer to the break between the first
+ // and second line, or EOF if there is only one line of source.
+ LineOffsets() []int32
+
+ // LocationOffset translates a Location to an offset.
+ // Given the line and column of the Location returns the
+ // Location's character offset in the Source, and a bool
+ // indicating whether the Location was found.
+ LocationOffset(location Location) (int32, bool)
+
+ // OffsetLocation translates a character offset to a Location, or
+ // false if the conversion was not feasible.
+ OffsetLocation(offset int32) (Location, bool)
+
+ // NewLocation takes an input line and column and produces a Location.
+ // The default behavior is to treat the line and column as absolute,
+ // but concrete derivations may use this method to convert a relative
+ // line and column position into an absolute location.
+ NewLocation(line, col int) Location
+
+ // Snippet returns a line of content and whether the line was found.
+ Snippet(line int) (string, bool)
+}
+
+// The sourceImpl type implementation of the Source interface.
+type sourceImpl struct {
+ runes.Buffer
+ description string
+ lineOffsets []int32
+}
+
+var _ runes.Buffer = &sourceImpl{}
+
+// TODO(jimlarson) "Character offsets" should index the code points
+// within the UTF-8 encoded string. It currently indexes bytes.
+// Can be accomplished by using rune[] instead of string for contents.
+
+// NewTextSource creates a new Source from the input text string.
+func NewTextSource(text string) Source {
+ return NewStringSource(text, "")
+}
+
+// NewStringSource creates a new Source from the given contents and description.
+func NewStringSource(contents string, description string) Source {
+ // Compute line offsets up front as they are referred to frequently.
+ buf, offs := runes.NewBufferAndLineOffsets(contents)
+ return &sourceImpl{
+ Buffer: buf,
+ description: description,
+ lineOffsets: offs,
+ }
+}
+
+// NewInfoSource creates a new Source from a SourceInfo.
+func NewInfoSource(info *exprpb.SourceInfo) Source {
+ return &sourceImpl{
+ Buffer: runes.NewBuffer(""),
+ description: info.GetLocation(),
+ lineOffsets: info.GetLineOffsets(),
+ }
+}
+
+// Content implements the Source interface method.
+func (s *sourceImpl) Content() string {
+ return s.Slice(0, s.Len())
+}
+
+// Description implements the Source interface method.
+func (s *sourceImpl) Description() string {
+ return s.description
+}
+
+// LineOffsets implements the Source interface method.
+func (s *sourceImpl) LineOffsets() []int32 {
+ return s.lineOffsets
+}
+
+// LocationOffset implements the Source interface method.
+func (s *sourceImpl) LocationOffset(location Location) (int32, bool) {
+ if lineOffset, found := s.findLineOffset(location.Line()); found {
+ return lineOffset + int32(location.Column()), true
+ }
+ return -1, false
+}
+
+// NewLocation implements the Source interface method.
+func (s *sourceImpl) NewLocation(line, col int) Location {
+ return NewLocation(line, col)
+}
+
+// OffsetLocation implements the Source interface method.
+func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) {
+ line, lineOffset := s.findLine(offset)
+ return NewLocation(int(line), int(offset-lineOffset)), true
+}
+
+// Snippet implements the Source interface method.
+func (s *sourceImpl) Snippet(line int) (string, bool) {
+ charStart, found := s.findLineOffset(line)
+ if !found || s.Len() == 0 {
+ return "", false
+ }
+ charEnd, found := s.findLineOffset(line + 1)
+ if found {
+ return s.Slice(int(charStart), int(charEnd-1)), true
+ }
+ return s.Slice(int(charStart), s.Len()), true
+}
+
+// findLineOffset returns the offset where the (1-indexed) line begins,
+// or false if line doesn't exist.
+func (s *sourceImpl) findLineOffset(line int) (int32, bool) {
+ if line == 1 {
+ return 0, true
+ }
+ if line > 1 && line <= int(len(s.lineOffsets)) {
+ offset := s.lineOffsets[line-2]
+ return offset, true
+ }
+ return -1, false
+}
+
+// findLine finds the line that contains the given character offset and
+// returns the line number and offset of the beginning of that line.
+// Note that the last line is treated as if it contains all offsets
+// beyond the end of the actual source.
+func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
+ var line int32 = 1
+ for _, lineOffset := range s.lineOffsets {
+ if lineOffset > characterOffset {
+ break
+ }
+ line++
+ }
+ if line == 1 {
+ return line, 0
+ }
+ return line, s.lineOffsets[line-2]
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
new file mode 100644
index 0000000000..b55f452156
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "standard.go",
+ ],
+ importpath = "github.com/google/cel-go/common/stdlib",
+ deps = [
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ ],
+)
\ No newline at end of file
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/stdlib/standard.go b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/standard.go
new file mode 100644
index 0000000000..1550c17863
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/standard.go
@@ -0,0 +1,620 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package stdlib contains all of the standard library function declarations and definitions for CEL.
+package stdlib
+
+import (
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ stdFunctions []*decls.FunctionDecl
+ stdTypes []*decls.VariableDecl
+)
+
+func init() {
+ paramA := types.NewTypeParamType("A")
+ paramB := types.NewTypeParamType("B")
+ listOfA := types.NewListType(paramA)
+ mapOfAB := types.NewMapType(paramA, paramB)
+
+ stdTypes = []*decls.VariableDecl{
+ decls.TypeVariable(types.BoolType),
+ decls.TypeVariable(types.BytesType),
+ decls.TypeVariable(types.DoubleType),
+ decls.TypeVariable(types.DurationType),
+ decls.TypeVariable(types.IntType),
+ decls.TypeVariable(listOfA),
+ decls.TypeVariable(mapOfAB),
+ decls.TypeVariable(types.NullType),
+ decls.TypeVariable(types.StringType),
+ decls.TypeVariable(types.TimestampType),
+ decls.TypeVariable(types.TypeType),
+ decls.TypeVariable(types.UintType),
+ }
+
+ stdFunctions = []*decls.FunctionDecl{
+ // Logical operators. Special-cased within the interpreter.
+ // Note, the singleton binding prevents extensions from overriding the operator behavior.
+ function(operators.Conditional,
+ decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonFunctionBinding(noFunctionOverrides)),
+ function(operators.LogicalAnd,
+ decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.LogicalOr,
+ decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.LogicalNot,
+ decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ b, ok := val.(types.Bool)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(val)
+ }
+ return b.Negate()
+ })),
+
+ // Comprehension short-circuiting related function
+ function(operators.NotStrictlyFalse,
+ decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict(),
+ decls.UnaryBinding(notStrictlyFalse))),
+ // Deprecated: __not_strictly_false__
+ function(operators.OldNotStrictlyFalse,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict(),
+ decls.UnaryBinding(notStrictlyFalse))),
+
+ // Equality / inequality. Special-cased in the interpreter
+ function(operators.Equals,
+ decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.NotEquals,
+ decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+
+ // Mathematical operators
+ function(operators.Add,
+ decls.Overload(overloads.AddBytes,
+ argTypes(types.BytesType, types.BytesType), types.BytesType),
+ decls.Overload(overloads.AddDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.AddDurationDuration,
+ argTypes(types.DurationType, types.DurationType), types.DurationType),
+ decls.Overload(overloads.AddDurationTimestamp,
+ argTypes(types.DurationType, types.TimestampType), types.TimestampType),
+ decls.Overload(overloads.AddTimestampDuration,
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ decls.Overload(overloads.AddInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.AddList,
+ argTypes(listOfA, listOfA), listOfA),
+ decls.Overload(overloads.AddString,
+ argTypes(types.StringType, types.StringType), types.StringType),
+ decls.Overload(overloads.AddUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Adder).Add(rhs)
+ }, traits.AdderType)),
+ function(operators.Divide,
+ decls.Overload(overloads.DivideDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.DivideInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.DivideUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Divider).Divide(rhs)
+ }, traits.DividerType)),
+ function(operators.Modulo,
+ decls.Overload(overloads.ModuloInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.ModuloUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Modder).Modulo(rhs)
+ }, traits.ModderType)),
+ function(operators.Multiply,
+ decls.Overload(overloads.MultiplyDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.MultiplyInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.MultiplyUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Multiplier).Multiply(rhs)
+ }, traits.MultiplierType)),
+ function(operators.Negate,
+ decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ if types.IsBool(val) {
+ return types.MaybeNoSuchOverloadErr(val)
+ }
+ return val.(traits.Negater).Negate()
+ }, traits.NegatorType)),
+ function(operators.Subtract,
+ decls.Overload(overloads.SubtractDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.SubtractDurationDuration,
+ argTypes(types.DurationType, types.DurationType), types.DurationType),
+ decls.Overload(overloads.SubtractInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.SubtractTimestampDuration,
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ decls.Overload(overloads.SubtractTimestampTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.DurationType),
+ decls.Overload(overloads.SubtractUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Subtractor).Subtract(rhs)
+ }, traits.SubtractorType)),
+
+ // Relations operators
+
+ function(operators.Less,
+ decls.Overload(overloads.LessBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.LessInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.LessBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.LessTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.LessDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne {
+ return types.True
+ }
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.LessEquals,
+ decls.Overload(overloads.LessEqualsBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.LessEqualsBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.LessEqualsTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntOne {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.Greater,
+ decls.Overload(overloads.GreaterBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.GreaterBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.GreaterTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.GreaterDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne {
+ return types.True
+ }
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.GreaterEquals,
+ decls.Overload(overloads.GreaterEqualsBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntNegOne {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ // Indexing
+ function(operators.Index,
+ decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA),
+ decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Indexer).Get(rhs)
+ }, traits.IndexerType)),
+
+ // Collections operators
+ function(operators.In,
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(operators.OldIn,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(overloads.DeprecatedIn,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(overloads.Size,
+ decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType),
+ decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType),
+ decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType),
+ decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType),
+ decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType),
+ decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType),
+ decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType),
+ decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ return val.(traits.Sizer).Size()
+ }, traits.SizerType)),
+
+ // Type conversions
+ function(overloads.TypeConvertType,
+ decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)),
+ decls.SingletonUnaryBinding(convertToType(types.TypeType))),
+
+ // Bool conversions
+ function(overloads.TypeConvertBool,
+ decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType,
+ decls.UnaryBinding(convertToType(types.BoolType)))),
+
+ // Bytes conversions
+ function(overloads.TypeConvertBytes,
+ decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType,
+ decls.UnaryBinding(convertToType(types.BytesType)))),
+
+ // Double conversions
+ function(overloads.TypeConvertDouble,
+ decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType))),
+ decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType))),
+ decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType)))),
+
+ // Duration conversions
+ function(overloads.TypeConvertDuration,
+ decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType,
+ decls.UnaryBinding(convertToType(types.DurationType))),
+ decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType,
+ decls.UnaryBinding(convertToType(types.DurationType)))),
+
+ // Dyn conversions
+ function(overloads.TypeConvertDyn,
+ decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType),
+ decls.SingletonUnaryBinding(identity)),
+
+ // Int conversions
+ function(overloads.TypeConvertInt,
+ decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ ),
+
+ // String conversions
+ function(overloads.TypeConvertString,
+ decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType)))),
+
+ // Timestamp conversions
+ function(overloads.TypeConvertTimestamp,
+ decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType,
+ decls.UnaryBinding(convertToType(types.TimestampType))),
+ decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType,
+ decls.UnaryBinding(convertToType(types.TimestampType)))),
+
+ // Uint conversions
+ function(overloads.TypeConvertUint,
+ decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType))),
+ decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType))),
+ decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType)))),
+
+ // String functions
+ function(overloads.Contains,
+ decls.MemberOverload(overloads.ContainsString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringContains)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.EndsWith,
+ decls.MemberOverload(overloads.EndsWithString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringEndsWith)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.StartsWith,
+ decls.MemberOverload(overloads.StartsWithString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringStartsWith)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.Matches,
+ decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.MemberOverload(overloads.MatchesString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val {
+ return str.(traits.Matcher).Match(pat)
+ }, traits.MatcherType)),
+
+ // Timestamp / duration functions
+ function(overloads.TimeGetFullYear,
+ decls.MemberOverload(overloads.TimestampToYear,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToYearWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetMonth,
+ decls.MemberOverload(overloads.TimestampToMonth,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMonthWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfYear,
+ decls.MemberOverload(overloads.TimestampToDayOfYear,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfYearWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfMonth,
+ decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDate,
+ decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfWeek,
+ decls.MemberOverload(overloads.TimestampToDayOfWeek,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetHours,
+ decls.MemberOverload(overloads.TimestampToHours,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToHoursWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToHours,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetMinutes,
+ decls.MemberOverload(overloads.TimestampToMinutes,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMinutesWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToMinutes,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetSeconds,
+ decls.MemberOverload(overloads.TimestampToSeconds,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToSecondsWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToSeconds,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetMilliseconds,
+ decls.MemberOverload(overloads.TimestampToMilliseconds,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMillisecondsWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToMilliseconds,
+ argTypes(types.DurationType), types.IntType)),
+ }
+}
+
+// Functions returns the set of standard library function declarations and definitions for CEL.
+func Functions() []*decls.FunctionDecl {
+ return stdFunctions
+}
+
+// Types returns the set of standard library types for CEL.
+func Types() []*decls.VariableDecl {
+ return stdTypes
+}
+
+func notStrictlyFalse(value ref.Val) ref.Val {
+ if types.IsBool(value) {
+ return value
+ }
+ return types.True
+}
+
+func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val {
+ if rhs.Type().HasTrait(traits.ContainerType) {
+ return rhs.(traits.Container).Contains(lhs)
+ }
+ return types.ValOrErr(rhs, "no such overload")
+}
+
+func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl {
+ fn, err := decls.NewFunction(name, opts...)
+ if err != nil {
+ panic(err)
+ }
+ return fn
+}
+
+func argTypes(args ...*types.Type) []*types.Type {
+ return args
+}
+
+func noBinaryOverrides(rhs, lhs ref.Val) ref.Val {
+ return types.NoSuchOverloadErr()
+}
+
+func noFunctionOverrides(args ...ref.Val) ref.Val {
+ return types.NoSuchOverloadErr()
+}
+
+func identity(val ref.Val) ref.Val {
+ return val
+}
+
+func convertToType(t ref.Type) functions.UnaryOp {
+ return func(val ref.Val) ref.Val {
+ return val.ConvertToType(t)
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/BUILD.bazel
new file mode 100644
index 0000000000..8f010fae44
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/BUILD.bazel
@@ -0,0 +1,92 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "any_value.go",
+ "bool.go",
+ "bytes.go",
+ "compare.go",
+ "double.go",
+ "duration.go",
+ "err.go",
+ "int.go",
+ "iterator.go",
+ "json_value.go",
+ "list.go",
+ "map.go",
+ "null.go",
+ "object.go",
+ "optional.go",
+ "overflow.go",
+ "provider.go",
+ "string.go",
+ "timestamp.go",
+ "types.go",
+ "uint.go",
+ "unknown.go",
+ "util.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@com_github_stoewer_go_strcase//:go_default_library",
+ "@dev_cel_expr//:expr",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "bool_test.go",
+ "bytes_test.go",
+ "double_test.go",
+ "duration_test.go",
+ "int_test.go",
+ "json_list_test.go",
+ "json_struct_test.go",
+ "list_test.go",
+ "map_test.go",
+ "null_test.go",
+ "object_test.go",
+ "optional_test.go",
+ "provider_test.go",
+ "string_test.go",
+ "timestamp_test.go",
+ "types_test.go",
+ "uint_test.go",
+ "unknown_test.go",
+ "util_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//common/types/ref:go_default_library",
+ "//test:go_default_library",
+ "//test/proto3pb:test_all_types_go_proto",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/any_value.go b/hack/tools/vendor/github.com/google/cel-go/common/types/any_value.go
new file mode 100644
index 0000000000..cda0f13acf
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/any_value.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// anyValueType constant representing the reflected type of google.protobuf.Any.
+var anyValueType = reflect.TypeOf(&anypb.Any{})
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/bool.go b/hack/tools/vendor/github.com/google/cel-go/common/types/bool.go
new file mode 100644
index 0000000000..565734f3ff
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/bool.go
@@ -0,0 +1,141 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Bool type that implements ref.Val and supports comparison and negation.
+type Bool bool
+
+var (
+ // boolWrapperType golang reflected type for protobuf bool wrapper type.
+ boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{})
+)
+
+// Boolean constants
+const (
+ False = Bool(false)
+ True = Bool(true)
+)
+
+// Compare implements the traits.Comparer interface method.
+func (b Bool) Compare(other ref.Val) ref.Val {
+ otherBool, ok := other.(Bool)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ if b == otherBool {
+ return IntZero
+ }
+ if !b && otherBool {
+ return IntNegOne
+ }
+ return IntOne
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Bool:
+ return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any.
+ return anypb.New(wrapperspb.Bool(bool(b)))
+ case boolWrapperType:
+ // Convert the bool to a wrapperspb.BoolValue.
+ return wrapperspb.Bool(bool(b)), nil
+ case jsonValueType:
+ // Return the bool as a new structpb.Value.
+ return structpb.NewBoolValue(bool(b)), nil
+ default:
+ if typeDesc.Elem().Kind() == reflect.Bool {
+ p := bool(b)
+ return &p, nil
+ }
+ }
+ case reflect.Interface:
+ bv := b.Value()
+ if reflect.TypeOf(bv).Implements(typeDesc) {
+ return bv, nil
+ }
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (b Bool) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatBool(bool(b)))
+ case BoolType:
+ return b
+ case TypeType:
+ return BoolType
+ }
+ return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (b Bool) Equal(other ref.Val) ref.Val {
+ otherBool, ok := other.(Bool)
+ return Bool(ok && b == otherBool)
+}
+
+// IsZeroValue returns true if the boolean value is false.
+func (b Bool) IsZeroValue() bool {
+ return b == False
+}
+
+// Negate implements the traits.Negater interface method.
+func (b Bool) Negate() ref.Val {
+ return !b
+}
+
+// Type implements the ref.Val interface method.
+func (b Bool) Type() ref.Type {
+ return BoolType
+}
+
+// Value implements the ref.Val interface method.
+func (b Bool) Value() any {
+ return bool(b)
+}
+
+// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
+func IsBool(elem ref.Val) bool {
+ switch v := elem.(type) {
+ case Bool:
+ return true
+ case ref.Val:
+ return v.Type() == BoolType
+ default:
+ return false
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/bytes.go b/hack/tools/vendor/github.com/google/cel-go/common/types/bytes.go
new file mode 100644
index 0000000000..7e813e291b
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -0,0 +1,140 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "unicode/utf8"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Bytes type that implements ref.Val and supports add, compare, and size
+// operations.
+type Bytes []byte
+
+var (
+ // byteWrapperType golang reflected type for protobuf bytes wrapper type.
+ byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{})
+)
+
+// Add implements traits.Adder interface method by concatenating byte sequences.
+func (b Bytes) Add(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ return append(b, otherBytes...)
+}
+
+// Compare implements traits.Comparer interface method by lexicographic ordering.
+func (b Bytes) Compare(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ return Int(bytes.Compare(b, otherBytes))
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Array:
+ if len(b) != typeDesc.Len() {
+ return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len())
+ }
+ refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem()))
+ refArr := refArrPtr.Elem()
+ for i, byt := range b {
+ refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem()))
+ }
+ return refArr.Interface(), nil
+ case reflect.Slice:
+ return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Bytes([]byte(b)))
+ case byteWrapperType:
+ // Convert the bytes to a wrapperspb.BytesValue.
+ return wrapperspb.Bytes([]byte(b)), nil
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64.
+ // The encoding below matches the golang 'encoding/json' behavior during marshaling,
+ // which uses base64.StdEncoding.
+ str := base64.StdEncoding.EncodeToString([]byte(b))
+ return structpb.NewStringValue(str), nil
+ }
+ case reflect.Interface:
+ bv := b.Value()
+ if reflect.TypeOf(bv).Implements(typeDesc) {
+ return bv, nil
+ }
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ if !utf8.Valid(b) {
+ return NewErr("invalid UTF-8 in bytes, cannot convert to string")
+ }
+ return String(b)
+ case BytesType:
+ return b
+ case TypeType:
+ return BytesType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (b Bytes) Equal(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ return Bool(ok && bytes.Equal(b, otherBytes))
+}
+
+// IsZeroValue returns true if the byte array is empty.
+func (b Bytes) IsZeroValue() bool {
+ return len(b) == 0
+}
+
+// Size implements the traits.Sizer interface method.
+func (b Bytes) Size() ref.Val {
+ return Int(len(b))
+}
+
+// Type implements the ref.Val interface method.
+func (b Bytes) Type() ref.Type {
+ return BytesType
+}
+
+// Value implements the ref.Val interface method.
+func (b Bytes) Value() any {
+ return []byte(b)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/compare.go b/hack/tools/vendor/github.com/google/cel-go/common/types/compare.go
new file mode 100644
index 0000000000..e196826180
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/compare.go
@@ -0,0 +1,97 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+func compareDoubleInt(d Double, i Int) Int {
+ if d < math.MinInt64 {
+ return IntNegOne
+ }
+ if d > math.MaxInt64 {
+ return IntOne
+ }
+ return compareDouble(d, Double(i))
+}
+
+func compareIntDouble(i Int, d Double) Int {
+ return -compareDoubleInt(d, i)
+}
+
+func compareDoubleUint(d Double, u Uint) Int {
+ if d < 0 {
+ return IntNegOne
+ }
+ if d > math.MaxUint64 {
+ return IntOne
+ }
+ return compareDouble(d, Double(u))
+}
+
+func compareUintDouble(u Uint, d Double) Int {
+ return -compareDoubleUint(d, u)
+}
+
+func compareIntUint(i Int, u Uint) Int {
+ if i < 0 || u > math.MaxInt64 {
+ return IntNegOne
+ }
+ cmp := i - Int(u)
+ if cmp < 0 {
+ return IntNegOne
+ }
+ if cmp > 0 {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareUintInt(u Uint, i Int) Int {
+ return -compareIntUint(i, u)
+}
+
+func compareDouble(a, b Double) Int {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareInt(a, b Int) ref.Val {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareUint(a, b Uint) ref.Val {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/doc.go b/hack/tools/vendor/github.com/google/cel-go/common/types/doc.go
new file mode 100644
index 0000000000..5f641d7043
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types contains the types, traits, and utilities common to all
+// components of expression handling.
+package types
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/double.go b/hack/tools/vendor/github.com/google/cel-go/common/types/double.go
new file mode 100644
index 0000000000..027e789786
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/double.go
@@ -0,0 +1,211 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Double type that implements ref.Val, comparison, and mathematical
+// operations.
+type Double float64
+
+var (
+ // doubleWrapperType reflected type for protobuf double wrapper type.
+ doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{})
+
+ // floatWrapperType reflected type for protobuf float wrapper type.
+ floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{})
+)
+
+// Add implements traits.Adder.Add.
+func (d Double) Add(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d + otherDouble
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Double) Compare(other ref.Val) ref.Val {
+ if math.IsNaN(float64(d)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareDouble(d, ov)
+ case Int:
+ return compareDoubleInt(d, ov)
+ case Uint:
+ return compareDoubleUint(d, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Float32:
+ v := float32(d)
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Float64:
+ v := float64(d)
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Double(float64(d)))
+ case doubleWrapperType:
+ // Convert to a wrapperspb.DoubleValue
+ return wrapperspb.Double(float64(d)), nil
+ case floatWrapperType:
+ // Convert to a wrapperspb.FloatValue (with truncation).
+ return wrapperspb.Float(float32(d)), nil
+ case jsonValueType:
+ // Note, there are special cases for proto3 to json conversion that
+ // expect the floating point value to be converted to a NaN,
+ // Infinity, or -Infinity string values, but the jsonpb string
+ // marshaling of the protobuf.Value will handle this conversion.
+ return structpb.NewNumberValue(float64(d)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Float32:
+ v := float32(d)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Float64:
+ v := float64(d)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ dv := d.Value()
+ if reflect.TypeOf(dv).Implements(typeDesc) {
+ return dv, nil
+ }
+ if reflect.TypeOf(d).Implements(typeDesc) {
+ return d, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ i, err := doubleToInt64Checked(float64(d))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(i)
+ case UintType:
+ i, err := doubleToUint64Checked(float64(d))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(i)
+ case DoubleType:
+ return d
+ case StringType:
+ return String(fmt.Sprintf("%g", float64(d)))
+ case TypeType:
+ return DoubleType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (d Double) Divide(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d / otherDouble
+}
+
+// Equal implements ref.Val.Equal.
+func (d Double) Equal(other ref.Val) ref.Val {
+ if math.IsNaN(float64(d)) {
+ return False
+ }
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(d == ov)
+ case Int:
+ return Bool(compareDoubleInt(d, ov) == 0)
+ case Uint:
+ return Bool(compareDoubleUint(d, ov) == 0)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if double value is 0.0
+func (d Double) IsZeroValue() bool {
+ return float64(d) == 0.0
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (d Double) Multiply(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d * otherDouble
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Double) Negate() ref.Val {
+ return -d
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Double) Subtract(subtrahend ref.Val) ref.Val {
+ subtraDouble, ok := subtrahend.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ return d - subtraDouble
+}
+
+// Type implements ref.Val.Type.
+func (d Double) Type() ref.Type {
+ return DoubleType
+}
+
+// Value implements ref.Val.Value.
+func (d Double) Value() any {
+ return float64(d)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/duration.go b/hack/tools/vendor/github.com/google/cel-go/common/types/duration.go
new file mode 100644
index 0000000000..596e56d6b0
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/duration.go
@@ -0,0 +1,222 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Duration type that implements ref.Val and supports add, compare, negate,
+// and subtract operators. This type is also a receiver which means it can
+// participate in dispatch to receiver functions.
+type Duration struct {
+ time.Duration
+}
+
+func durationOf(d time.Duration) Duration {
+ return Duration{Duration: d}
+}
+
+var (
+ durationValueType = reflect.TypeOf(&dpb.Duration{})
+
+ durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{
+ overloads.TimeGetHours: DurationGetHours,
+ overloads.TimeGetMinutes: DurationGetMinutes,
+ overloads.TimeGetSeconds: DurationGetSeconds,
+ overloads.TimeGetMilliseconds: DurationGetMilliseconds,
+ }
+)
+
+// Add implements traits.Adder.Add.
+func (d Duration) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ dur2 := other.(Duration)
+ val, err := addDurationChecked(d.Duration, dur2.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+ case TimestampType:
+ ts := other.(Timestamp).Time
+ val, err := addTimeDurationChecked(ts, d.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return timestampOf(val)
+ }
+ return MaybeNoSuchOverloadErr(other)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Duration) Compare(other ref.Val) ref.Val {
+ otherDur, ok := other.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ d1 := d.Duration
+ d2 := otherDur.Duration
+ switch {
+ case d1 < d2:
+ return IntNegOne
+ case d1 > d2:
+ return IntOne
+ default:
+ return IntZero
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the duration is already assignable to the desired type return it.
+ if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
+ return d.Duration, nil
+ }
+ if reflect.TypeOf(d).AssignableTo(typeDesc) {
+ return d, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ // Pack the duration as a dpb.Duration into an Any value.
+ return anypb.New(dpb.New(d.Duration))
+ case durationValueType:
+ // Unwrap the CEL value to its underlying proto value.
+ return dpb.New(d.Duration), nil
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion.
+ // Note, using jsonpb would wrap the result in extra double quotes.
+ v := d.ConvertToType(StringType)
+ if IsError(v) {
+ return nil, v.(*Err)
+ }
+ return structpb.NewStringValue(string(v.(String))), nil
+ }
+ return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Duration) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s")
+ case IntType:
+ return Int(d.Duration)
+ case DurationType:
+ return d
+ case TypeType:
+ return DurationType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d Duration) Equal(other ref.Val) ref.Val {
+ otherDur, ok := other.(Duration)
+ return Bool(ok && d.Duration == otherDur.Duration)
+}
+
+// IsZeroValue returns true if the duration value is zero
+func (d Duration) IsZeroValue() bool {
+ return d.Duration == 0
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Duration) Negate() ref.Val {
+ val, err := negateDurationChecked(d.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+}
+
+// Receive implements traits.Receiver.Receive.
+func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
+ if len(args) == 0 {
+ if f, found := durationZeroArgOverloads[function]; found {
+ return f(d)
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
+ subtraDur, ok := subtrahend.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+}
+
+// Type implements ref.Val.Type.
+func (d Duration) Type() ref.Type {
+ return DurationType
+}
+
+// Value implements ref.Val.Value.
+func (d Duration) Value() any {
+ return d.Duration
+}
+
+// DurationGetHours returns the duration in hours.
+func DurationGetHours(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Hours())
+}
+
+// DurationGetMinutes returns duration in minutes.
+func DurationGetMinutes(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Minutes())
+}
+
+// DurationGetSeconds returns duration in seconds.
+func DurationGetSeconds(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Seconds())
+}
+
+// DurationGetMilliseconds returns duration in milliseconds.
+func DurationGetMilliseconds(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Milliseconds())
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/err.go b/hack/tools/vendor/github.com/google/cel-go/common/types/err.go
new file mode 100644
index 0000000000..9c9d9e21ef
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/err.go
@@ -0,0 +1,169 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Error interface which allows types types.Err values to be treated as error values.
+type Error interface {
+ error
+ ref.Val
+}
+
+// Err type which extends the built-in go error and implements ref.Val.
+type Err struct {
+ error
+ id int64
+}
+
+var (
+ // ErrType singleton.
+ ErrType = NewOpaqueType("error")
+
+ // errDivideByZero is an error indicating a division by zero of an integer value.
+ errDivideByZero = errors.New("division by zero")
+ // errModulusByZero is an error indicating a modulus by zero of an integer value.
+ errModulusByZero = errors.New("modulus by zero")
+ // errIntOverflow is an error representing integer overflow.
+ errIntOverflow = errors.New("integer overflow")
+ // errUintOverflow is an error representing unsigned integer overflow.
+ errUintOverflow = errors.New("unsigned integer overflow")
+ // errDurationOverflow is an error representing duration overflow.
+ errDurationOverflow = errors.New("duration overflow")
+ // errTimestampOverflow is an error representing timestamp overflow.
+ errTimestampOverflow = errors.New("timestamp overflow")
+ celErrTimestampOverflow = &Err{error: errTimestampOverflow}
+
+ // celErrNoSuchOverload indicates that the call arguments did not match a supported method signature.
+ celErrNoSuchOverload = NewErr("no such overload")
+)
+
+// NewErr creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErr(format string, args ...any) ref.Val {
+ return &Err{error: fmt.Errorf(format, args...)}
+}
+
+// NewErrWithNodeID creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErrWithNodeID(id int64, format string, args ...any) ref.Val {
+ return &Err{error: fmt.Errorf(format, args...), id: id}
+}
+
+// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero
+// AST node ID already present. Otherwise the id is added to the error for
+// recovery with the Err.NodeID method.
+func LabelErrNode(id int64, val ref.Val) ref.Val {
+ if err, ok := val.(*Err); ok && err.id == 0 {
+ err.id = id
+ return err
+ }
+ return val
+}
+
+// NoSuchOverloadErr returns a new types.Err instance with a no such overload message.
+func NoSuchOverloadErr() ref.Val {
+ return celErrNoSuchOverload
+}
+
+// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
+// message that indicates that the native value could not be converted to a CEL ref.Val.
+func UnsupportedRefValConversionErr(val any) ref.Val {
+ return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
+}
+
+// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types,
+// else a new no such overload error.
+func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
+ return ValOrErr(val, "no such overload")
+}
+
+// ValOrErr either returns the existing error or creates a new one.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func ValOrErr(val ref.Val, format string, args ...any) ref.Val {
+ if val == nil || !IsUnknownOrError(val) {
+ return NewErr(format, args...)
+ }
+ return val
+}
+
+// WrapErr wraps an existing Go error value into a CEL Err value.
+func WrapErr(err error) ref.Val {
+ return &Err{error: err}
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, e.error
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (e *Err) ConvertToType(typeVal ref.Type) ref.Val {
+ // Errors are not convertible to other representations.
+ return e
+}
+
+// Equal implements ref.Val.Equal.
+func (e *Err) Equal(other ref.Val) ref.Val {
+ // An error cannot be equal to any other value, so it returns itself.
+ return e
+}
+
+// String implements fmt.Stringer.
+func (e *Err) String() string {
+ return e.error.Error()
+}
+
+// Type implements ref.Val.Type.
+func (e *Err) Type() ref.Type {
+ return ErrType
+}
+
+// Value implements ref.Val.Value.
+func (e *Err) Value() any {
+ return e.error
+}
+
+// NodeID returns the AST node ID of the expression that returned the error.
+func (e *Err) NodeID() int64 {
+ return e.id
+}
+
+// Is implements errors.Is.
+func (e *Err) Is(target error) bool {
+ return e.error.Error() == target.Error()
+}
+
+// Unwrap implements errors.Unwrap.
+func (e *Err) Unwrap() error {
+ return e.error
+}
+
+// IsError returns whether the input element ref.Type or ref.Val is equal to
+// the ErrType singleton.
+func IsError(val ref.Val) bool {
+ switch val.(type) {
+ case *Err:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/int.go b/hack/tools/vendor/github.com/google/cel-go/common/types/int.go
new file mode 100644
index 0000000000..0ae9507c39
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/int.go
@@ -0,0 +1,303 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Int type that implements ref.Val as well as comparison and math operators.
+type Int int64
+
+// Int constants used for comparison results.
+const (
+ // IntZero is the zero-value for Int
+ IntZero = Int(0)
+ IntOne = Int(1)
+ IntNegOne = Int(-1)
+)
+
+var (
+ // int32WrapperType reflected type for protobuf int32 wrapper type.
+ int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{})
+
+ // int64WrapperType reflected type for protobuf int64 wrapper type.
+ int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{})
+)
+
+// Add implements traits.Adder.Add.
+func (i Int) Add(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := addInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Int) Compare(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareIntDouble(i, ov)
+ case Int:
+ return compareInt(i, ov)
+ case Uint:
+ return compareIntUint(i, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int, reflect.Int32:
+ // Enums are also mapped as int32 derivations.
+ // Note, the code doesn't convert to the enum value directly since this is not known, but
+ // the net effect with respect to proto-assignment is handled correctly by the reflection
+ // Convert method.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int8:
+ v, err := int64ToInt8Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int16:
+ v, err := int64ToInt16Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int64:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Int64(int64(i)))
+ case int32WrapperType:
+ // Convert the value to a wrapperspb.Int32Value, error on overflow.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return wrapperspb.Int32(v), nil
+ case int64WrapperType:
+ // Convert the value to a wrapperspb.Int64Value.
+ return wrapperspb.Int64(int64(i)), nil
+ case jsonValueType:
+ // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON
+ // decimal strings. Because CEL ints might come from the automatic widening of 32-bit
+ // values in protos, the JSON type is chosen dynamically based on the value.
+ //
+ // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers.
+ // - Integers outside this range are encoded as JSON strings.
+ //
+ // The integer to float range represents the largest interval where such a conversion
+ // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON
+ // number as with protobuf. Those consuming JSON from a 64-bit source must be able to
+ // handle either a JSON number or a JSON decimal string. To handle these cases safely
+ // the string values must be explicitly converted to int() within a CEL expression;
+ // however, it is best to simply stay within the JSON number range when building JSON
+ // objects in CEL.
+ if i.isJSONSafe() {
+ return structpb.NewNumberValue(float64(i)), nil
+ }
+ // Proto3 to JSON conversion requires string-formatted int64 values
+ // since the conversion to floating point would result in truncation.
+ return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Int32:
+ // Convert the value to a wrapperspb.Int32Value, error on overflow.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Int64:
+ v := int64(i)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ iv := i.Value()
+ if reflect.TypeOf(iv).Implements(typeDesc) {
+ return iv, nil
+ }
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ return i
+ case UintType:
+ u, err := int64ToUint64Checked(int64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(u)
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", int64(i)))
+ case TimestampType:
+ // The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number
+ // of seconds between year 1 and year 1970. See comments on unixToInternal.
+ if int64(i) < minUnixTime || int64(i) > maxUnixTime {
+ return celErrTimestampOverflow
+ }
+ return timestampOf(time.Unix(int64(i), 0).UTC())
+ case TypeType:
+ return IntType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Int) Divide(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := divideInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Equal implements ref.Val.Equal.
+func (i Int) Equal(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(compareIntDouble(i, ov) == 0)
+ case Int:
+ return Bool(i == ov)
+ case Uint:
+ return Bool(compareIntUint(i, ov) == 0)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if integer is equal to 0
+func (i Int) IsZeroValue() bool {
+ return i == IntZero
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Int) Modulo(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := moduloInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Int) Multiply(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := multiplyInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Negate implements traits.Negater.Negate.
+func (i Int) Negate() ref.Val {
+ val, err := negateInt64Checked(int64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Int) Subtract(subtrahend ref.Val) ref.Val {
+ subtraInt, ok := subtrahend.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractInt64Checked(int64(i), int64(subtraInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Type implements ref.Val.Type.
+func (i Int) Type() ref.Type {
+ return IntType
+}
+
+// Value implements ref.Val.Value.
+func (i Int) Value() any {
+ return int64(i)
+}
+
+// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON.
+func (i Int) isJSONSafe() bool {
+ return i >= minIntJSON && i <= maxIntJSON
+}
+
+const (
+ // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6.
+ maxIntJSON = 1<<53 - 1
+ // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6.
+ minIntJSON = -maxIntJSON
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/iterator.go b/hack/tools/vendor/github.com/google/cel-go/common/types/iterator.go
new file mode 100644
index 0000000000..98e9147b6e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/iterator.go
@@ -0,0 +1,55 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ // IteratorType singleton.
+ IteratorType = NewObjectType("iterator", traits.IteratorType)
+)
+
+// baseIterator is the basis for list, map, and object iterators.
+//
+// An iterator in and of itself should not be a valid value for comparison, but must implement the
+// `ref.Val` methods in order to be well-supported within instruction arguments processed by the
+// interpreter.
+type baseIterator struct{}
+
+func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion on iterators not supported")
+}
+
+func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (*baseIterator) Equal(other ref.Val) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (*baseIterator) Type() ref.Type {
+ return IteratorType
+}
+
+func (*baseIterator) Value() any {
+ return nil
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/json_value.go b/hack/tools/vendor/github.com/google/cel-go/common/types/json_value.go
new file mode 100644
index 0000000000..13a4efe7ad
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/json_value.go
@@ -0,0 +1,29 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// JSON type constants representing the reflected types of protobuf JSON values.
+var (
+ jsonValueType = reflect.TypeOf(&structpb.Value{})
+ jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
+ jsonStructType = reflect.TypeOf(&structpb.Struct{})
+ jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/list.go b/hack/tools/vendor/github.com/google/cel-go/common/types/list.go
new file mode 100644
index 0000000000..ca47d39fec
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/list.go
@@ -0,0 +1,574 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// NewDynamicList returns a traits.Lister with heterogenous elements.
+// value should be an array of "native" types, i.e. any type that
+// NativeToValue() can convert to a ref.Val.
+func NewDynamicList(adapter Adapter, value any) traits.Lister {
+ refValue := reflect.ValueOf(value)
+ return &baseList{
+ Adapter: adapter,
+ value: value,
+ size: refValue.Len(),
+ get: func(i int) any {
+ return refValue.Index(i).Interface()
+ },
+ }
+}
+
+// NewStringList returns a traits.Lister containing only strings.
+func NewStringList(adapter Adapter, elems []string) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: elems,
+ size: len(elems),
+ get: func(i int) any { return elems[i] },
+ }
+}
+
+// NewRefValList returns a traits.Lister with ref.Val elements.
+//
+// This type specialization is used with list literals within CEL expressions.
+func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: elems,
+ size: len(elems),
+ get: func(i int) any { return elems[i] },
+ }
+}
+
+// NewProtoList returns a traits.Lister based on a pb.List instance.
+func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: list,
+ size: list.Len(),
+ get: func(i int) any { return list.Get(i).Interface() },
+ }
+}
+
+// NewJSONList returns a traits.Lister based on structpb.ListValue instance.
+func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister {
+ vals := l.GetValues()
+ return &baseList{
+ Adapter: adapter,
+ value: l,
+ size: len(vals),
+ get: func(i int) any { return vals[i] },
+ }
+}
+
+// NewMutableList creates a new mutable list whose internal state can be modified.
+func NewMutableList(adapter Adapter) traits.MutableLister {
+ var mutableValues []ref.Val
+ l := &mutableList{
+ baseList: &baseList{
+ Adapter: adapter,
+ value: mutableValues,
+ size: 0,
+ },
+ mutableValues: mutableValues,
+ }
+ l.get = func(i int) any {
+ return l.mutableValues[i]
+ }
+ return l
+}
+
+// baseList points to a list containing elements of any type.
+// The `value` is an array of native values, and refValue is its reflection object.
+// The `Adapter` enables native type to CEL type conversions.
+type baseList struct {
+ Adapter
+ value any
+
+ // size indicates the number of elements within the list.
+ // Since objects are immutable the size of a list is static.
+ size int
+
+ // get returns a value at the specified integer index.
+ // The index is guaranteed to be checked against the list index range.
+ get func(int) any
+}
+
+// Add implements the traits.Adder interface method.
+func (l *baseList) Add(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if otherList.Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ Adapter: l.Adapter,
+ prevList: l,
+ nextList: otherList}
+}
+
+// Contains implements the traits.Container interface method.
+func (l *baseList) Contains(elem ref.Val) ref.Val {
+ for i := 0; i < l.size; i++ {
+ val := l.NativeToValue(l.get(i))
+ cmp := elem.Equal(val)
+ b, ok := cmp.(Bool)
+ if ok && b == True {
+ return True
+ }
+ }
+ return False
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the underlying list value is assignable to the reflected type return it.
+ if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
+ return l.value, nil
+ }
+ // If the list wrapper is assignable to the desired type return it.
+ if reflect.TypeOf(l).AssignableTo(typeDesc) {
+ return l, nil
+ }
+ // Attempt to convert the list to a set of well known protobuf types.
+ switch typeDesc {
+ case anyValueType:
+ json, err := l.ConvertToNative(jsonListValueType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonListValueType:
+ jsonValues, err :=
+ l.ConvertToNative(reflect.TypeOf([]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)}
+ if typeDesc == jsonListValueType {
+ return jsonList, nil
+ }
+ return structpb.NewListValue(jsonList), nil
+ }
+ // Non-list conversion.
+ if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array {
+ return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
+ }
+
+ // List conversion.
+ // Allow the element ConvertToNative() function to determine whether conversion is possible.
+ otherElemType := typeDesc.Elem()
+ elemCount := l.size
+ var nativeList reflect.Value
+ if typeDesc.Kind() == reflect.Array {
+ nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0)
+ } else {
+ nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount)
+
+ }
+ for i := 0; i < elemCount; i++ {
+ elem := l.NativeToValue(l.get(i))
+ nativeElemVal, err := elem.ConvertToNative(otherElemType)
+ if err != nil {
+ return nil, err
+ }
+ nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
+ }
+ return nativeList.Interface(), nil
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (l *baseList) Equal(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return False
+ }
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := Equal(thisElem, otherElem)
+ if elemEq == False {
+ return False
+ }
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (l *baseList) Get(index ref.Val) ref.Val {
+ ind, err := IndexOrError(index)
+ if err != nil {
+ return ValOrErr(index, err.Error())
+ }
+ if ind < 0 || ind >= l.size {
+ return NewErr("index '%d' out of range in list size '%d'", ind, l.Size())
+ }
+ return l.NativeToValue(l.get(ind))
+}
+
+// IsZeroValue returns true if the list is empty.
+func (l *baseList) IsZeroValue() bool {
+ return l.size == 0
+}
+
+// Fold calls the FoldEntry method for each (index, value) pair in the list.
+func (l *baseList) Fold(f traits.Folder) {
+ for i := 0; i < l.size; i++ {
+ if !f.FoldEntry(i, l.get(i)) {
+ break
+ }
+ }
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (l *baseList) Iterator() traits.Iterator {
+ return newListIterator(l)
+}
+
+// Size implements the traits.Sizer interface method.
+func (l *baseList) Size() ref.Val {
+ return Int(l.size)
+}
+
+// Type implements the ref.Val interface method.
+func (l *baseList) Type() ref.Type {
+ return ListType
+}
+
+// Value implements the ref.Val interface method.
+func (l *baseList) Value() any {
+ return l.value
+}
+
+// String converts the list to a human readable string form.
+func (l *baseList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := 0; i < l.size; i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.get(i)))
+ if i != l.size-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
+type mutableList struct {
+ *baseList
+ mutableValues []ref.Val
+}
+
+// Add copies elements from the other list into the internal storage of the mutable list.
+// The ref.Val returned by Add is the receiver.
+func (l *mutableList) Add(other ref.Val) ref.Val {
+ switch otherList := other.(type) {
+ case *mutableList:
+ l.mutableValues = append(l.mutableValues, otherList.mutableValues...)
+ l.size += len(otherList.mutableValues)
+ case traits.Lister:
+ for i := IntZero; i < otherList.Size().(Int); i++ {
+ l.size++
+ l.mutableValues = append(l.mutableValues, otherList.Get(i))
+ }
+ default:
+ return MaybeNoSuchOverloadErr(otherList)
+ }
+ return l
+}
+
+// ToImmutableList returns an immutable list based on the internal storage of the mutable list.
+func (l *mutableList) ToImmutableList() traits.Lister {
+ // The reference to internal state is guaranteed to be safe as this call is only performed
+ // when mutations have been completed.
+ return NewRefValList(l.Adapter, l.mutableValues)
+}
+
+// concatList combines two list implementations together into a view.
+// The `Adapter` enables native type to CEL type conversions.
+type concatList struct {
+ Adapter
+ value any
+ prevList traits.Lister
+ nextList traits.Lister
+}
+
+// Add implements the traits.Adder interface method.
+func (l *concatList) Add(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if otherList.Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ Adapter: l.Adapter,
+ prevList: l,
+ nextList: otherList}
+}
+
+// Contains implements the traits.Container interface method.
+func (l *concatList) Contains(elem ref.Val) ref.Val {
+ // The concat list relies on the IsErrorOrUnknown checks against the input element to be
+ // performed by the `prevList` and/or `nextList`.
+ prev := l.prevList.Contains(elem)
+ // Short-circuit the return if the elem was found in the prev list.
+ if prev == True {
+ return prev
+ }
+ // Return if the elem was found in the next list.
+ next := l.nextList.Contains(elem)
+ if next == True {
+ return next
+ }
+ // Handle the case where an error or unknown was encountered before checking next.
+ if IsUnknownOrError(prev) {
+ return prev
+ }
+ // Otherwise, rely on the next value as the representative result.
+ return next
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ combined := NewDynamicList(l.Adapter, l.Value().([]any))
+ return combined.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (l *concatList) Equal(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return False
+ }
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ var maybeErr ref.Val
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := Equal(thisElem, otherElem)
+ if elemEq == False {
+ return False
+ }
+ if maybeErr == nil && IsUnknownOrError(elemEq) {
+ maybeErr = elemEq
+ }
+ }
+ if maybeErr != nil {
+ return maybeErr
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (l *concatList) Get(index ref.Val) ref.Val {
+ ind, err := IndexOrError(index)
+ if err != nil {
+ return ValOrErr(index, err.Error())
+ }
+ i := Int(ind)
+ if i < l.prevList.Size().(Int) {
+ return l.prevList.Get(i)
+ }
+ offset := i - l.prevList.Size().(Int)
+ return l.nextList.Get(offset)
+}
+
+// IsZeroValue returns true if the list is empty.
+func (l *concatList) IsZeroValue() bool {
+ return l.Size().(Int) == 0
+}
+
+// Fold calls the FoldEntry method for each (index, value) pair in the list.
+func (l *concatList) Fold(f traits.Folder) {
+ for i := Int(0); i < l.Size().(Int); i++ {
+ if !f.FoldEntry(i, l.Get(i)) {
+ break
+ }
+ }
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (l *concatList) Iterator() traits.Iterator {
+ return newListIterator(l)
+}
+
+// Size implements the traits.Sizer interface method.
+func (l *concatList) Size() ref.Val {
+ return l.prevList.Size().(Int).Add(l.nextList.Size())
+}
+
+// String converts the concatenated list to a human-readable string.
+func (l *concatList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := Int(0); i < l.Size().(Int); i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.Get(i)))
+ if i != l.Size().(Int)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// Type implements the ref.Val interface method.
+func (l *concatList) Type() ref.Type {
+ return ListType
+}
+
+// Value implements the ref.Val interface method.
+func (l *concatList) Value() any {
+ if l.value == nil {
+ merged := make([]any, l.Size().(Int))
+ prevLen := l.prevList.Size().(Int)
+ for i := Int(0); i < prevLen; i++ {
+ merged[i] = l.prevList.Get(i).Value()
+ }
+ nextLen := l.nextList.Size().(Int)
+ for j := Int(0); j < nextLen; j++ {
+ merged[prevLen+j] = l.nextList.Get(j).Value()
+ }
+ l.value = merged
+ }
+ return l.value
+}
+
+func newListIterator(listValue traits.Lister) traits.Iterator {
+ return &listIterator{
+ listValue: listValue,
+ len: listValue.Size().(Int),
+ }
+}
+
+type listIterator struct {
+ *baseIterator
+ listValue traits.Lister
+ cursor Int
+ len Int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *listIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *listIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return it.listValue.Get(index)
+ }
+ return nil
+}
+
+// IndexOrError converts an input index value into either a lossless integer index or an error.
+func IndexOrError(index ref.Val) (int, error) {
+ switch iv := index.(type) {
+ case Int:
+ return int(iv), nil
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(iv)); ok {
+ return int(ik), nil
+ }
+ return -1, fmt.Errorf("unsupported index value %v in list", index)
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(iv)); ok {
+ return int(ik), nil
+ }
+ return -1, fmt.Errorf("unsupported index value %v in list", index)
+ default:
+ return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type())
+ }
+}
+
+// ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration.
+//
+// For values which are already Foldable, this call is a no-op. For all other values, the fold is
+// driven via the Size() and Get() calls which means that the folding will function, but take a
+// performance hit.
+func ToFoldableList(l traits.Lister) traits.Foldable {
+ if f, ok := l.(traits.Foldable); ok {
+ return f
+ }
+ return interopFoldableList{Lister: l}
+}
+
+type interopFoldableList struct {
+ traits.Lister
+}
+
+// Fold implements the traits.Foldable interface method and performs an iteration over the
+// range of elements of the list.
+func (l interopFoldableList) Fold(f traits.Folder) {
+ sz := l.Size().(Int)
+ for i := Int(0); i < sz; i++ {
+ if !f.FoldEntry(i, l.Get(i)) {
+ break
+ }
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/map.go b/hack/tools/vendor/github.com/google/cel-go/common/types/map.go
new file mode 100644
index 0000000000..cb6cce78b0
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/map.go
@@ -0,0 +1,1002 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/stoewer/go-strcase"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
+func NewDynamicMap(adapter Adapter, value any) traits.Mapper {
+ refValue := reflect.ValueOf(value)
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newReflectMapAccessor(adapter, refValue),
+ value: value,
+ size: refValue.Len(),
+ }
+}
+
+// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been
+// encoded in protocol buffer form.
+//
+// The `adapter` argument provides type adaptation capabilities from proto to CEL.
+func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper {
+ fields := value.GetFields()
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newJSONStructAccessor(adapter, fields),
+ value: value,
+ size: len(fields),
+ }
+}
+
+// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values.
+func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newRefValMapAccessor(value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
+func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newStringIfaceMapAccessor(adapter, value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
+func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newStringMapAccessor(value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values.
+func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper {
+ return &protoMap{
+ Adapter: adapter,
+ value: value,
+ }
+}
+
+// NewMutableMap constructs a mutable map from an adapter and a set of map values.
+func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper {
+ mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues))
+ for k, v := range mutableValues {
+ mutableCopy[k] = v
+ }
+ m := &mutableMap{
+ baseMap: &baseMap{
+ Adapter: adapter,
+ mapAccessor: newRefValMapAccessor(mutableCopy),
+ value: mutableCopy,
+ size: len(mutableCopy),
+ },
+ mutableValues: mutableCopy,
+ }
+ return m
+}
+
+// mapAccessor is a private interface for finding values within a map and iterating over the keys.
+// This interface implements portions of the API surface area required by the traits.Mapper
+// interface.
+type mapAccessor interface {
+ // Find returns a value, if one exists, for the input key.
+ //
+ // If the key is not found the function returns (nil, false).
+ Find(ref.Val) (ref.Val, bool)
+
+ // Iterator returns an Iterator over the map key set.
+ Iterator() traits.Iterator
+
+ // Fold calls the FoldEntry method for each (key, value) pair in the map.
+ Fold(traits.Folder)
+}
+
+// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
+//
+// Since CEL is side-effect free, the base map represents an immutable object.
+type baseMap struct {
+ // TypeAdapter used to convert keys and values accessed within the map.
+ Adapter
+
+ // mapAccessor interface implementation used to find and iterate over map keys.
+ mapAccessor
+
+ // value is the native Go value upon which the map type operators.
+ value any
+
+ // size is the number of entries in the map.
+ size int
+}
+
+// Contains implements the traits.Container interface method.
+func (m *baseMap) Contains(index ref.Val) ref.Val {
+ _, found := m.Find(index)
+ return Bool(found)
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the map is already assignable to the desired type return it, e.g. interfaces and
+ // maps with the same key value types.
+ if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
+ return m.value, nil
+ }
+ if reflect.TypeOf(m).AssignableTo(typeDesc) {
+ return m, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ json, err := m.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonStructType:
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)}
+ if typeDesc == jsonStructType {
+ return jsonMap, nil
+ }
+ return structpb.NewStructValue(jsonMap), nil
+ }
+
+ // Unwrap pointers, but track their use.
+ isPtr := false
+ if typeDesc.Kind() == reflect.Ptr {
+ tk := typeDesc
+ typeDesc = typeDesc.Elem()
+ if typeDesc.Kind() == reflect.Ptr {
+ return nil, fmt.Errorf("unsupported type conversion to '%v'", tk)
+ }
+ isPtr = true
+ }
+ switch typeDesc.Kind() {
+ // Map conversion.
+ case reflect.Map:
+ otherKey := typeDesc.Key()
+ otherElem := typeDesc.Elem()
+ nativeMap := reflect.MakeMapWithSize(typeDesc, m.size)
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ refKeyValue, err := key.ConvertToNative(otherKey)
+ if err != nil {
+ return nil, err
+ }
+ refElemValue, err := m.Get(key).ConvertToNative(otherElem)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue))
+ }
+ return nativeMap.Interface(), nil
+ case reflect.Struct:
+ nativeStructPtr := reflect.New(typeDesc)
+ nativeStruct := nativeStructPtr.Elem()
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ // Ensure the field name being referenced is exported.
+ // Only exported (public) field names can be set by reflection, where the name
+ // must be at least one character in length and start with an upper-case letter.
+ fieldName := key.ConvertToType(StringType)
+ if IsError(fieldName) {
+ return nil, fieldName.(*Err)
+ }
+ name := string(fieldName.(String))
+ name = strcase.UpperCamelCase(name)
+ fieldRef := nativeStruct.FieldByName(name)
+ if !fieldRef.IsValid() {
+ return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc)
+ }
+ fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type())
+ if err != nil {
+ return nil, err
+ }
+ fieldRef.Set(reflect.ValueOf(fieldValue))
+ }
+ if isPtr {
+ return nativeStructPtr.Interface(), nil
+ }
+ return nativeStruct.Interface(), nil
+ }
+ return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (m *baseMap) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(traits.Mapper)
+ if !ok {
+ return False
+ }
+ if m.Size() != otherMap.Size() {
+ return False
+ }
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ thisVal, _ := m.Find(key)
+ otherVal, found := otherMap.Find(key)
+ if !found {
+ return False
+ }
+ valEq := Equal(thisVal, otherVal)
+ if valEq == False {
+ return False
+ }
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (m *baseMap) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if !found {
+ return ValOrErr(v, "no such key: %v", key)
+ }
+ return v
+}
+
+// IsZeroValue returns true if the map is empty.
+func (m *baseMap) IsZeroValue() bool {
+ return m.size == 0
+}
+
+// Size implements the traits.Sizer interface method.
+func (m *baseMap) Size() ref.Val {
+ return Int(m.size)
+}
+
+// String converts the map into a human-readable string.
+func (m *baseMap) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ it := m.Iterator()
+ i := 0
+ for it.HasNext() == True {
+ k := it.Next()
+ v, _ := m.Find(k)
+ sb.WriteString(fmt.Sprintf("%v: %v", k, v))
+ if i != m.size-1 {
+ sb.WriteString(", ")
+ }
+ i++
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
+// Type implements the ref.Val interface method.
+func (m *baseMap) Type() ref.Type {
+ return MapType
+}
+
+// Value implements the ref.Val interface method.
+func (m *baseMap) Value() any {
+ return m.value
+}
+
+// mutableMap holds onto a set of mutable values which are used for intermediate computations.
+type mutableMap struct {
+ *baseMap
+ mutableValues map[ref.Val]ref.Val
+}
+
+// Insert implements the traits.MutableMapper interface method, returning true if the key insertion
+// succeeds.
+func (m *mutableMap) Insert(k, v ref.Val) ref.Val {
+ if _, found := m.Find(k); found {
+ return NewErr("insert failed: key %v already exists", k)
+ }
+ m.mutableValues[k] = v
+ return m
+}
+
+// ToImmutableMap implements the traits.MutableMapper interface method, converting a mutable map
+// an immutable map implementation.
+func (m *mutableMap) ToImmutableMap() traits.Mapper {
+ return NewRefValMap(m.Adapter, m.mutableValues)
+}
+
+func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor {
+ return &jsonStructAccessor{
+ Adapter: adapter,
+ st: st,
+ }
+}
+
+type jsonStructAccessor struct {
+ Adapter
+ st map[string]*structpb.Value
+}
+
+// Find searches the json struct field map for the input key value and returns (value, true) if
+// found.
+//
+// If the key is not found the function returns (nil, false).
+func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.st[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return a.NativeToValue(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the set of JSON struct field names.
+func (a *jsonStructAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.st))
+ i := 0
+ for k := range a.st {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *jsonStructAccessor) Fold(f traits.Folder) {
+ for k, v := range a.st {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor {
+ keyType := value.Type().Key()
+ return &reflectMapAccessor{
+ Adapter: adapter,
+ refValue: value,
+ keyType: keyType,
+ }
+}
+
+type reflectMapAccessor struct {
+ Adapter
+ refValue reflect.Value
+ keyType reflect.Type
+}
+
+// Find converts the input key to a native Golang type and then uses reflection to find the key,
+// returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (m *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ if m.refValue.Len() == 0 {
+ return nil, false
+ }
+ if keyVal, found := m.findInternal(key); found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ // Double is not a valid proto map key type, so check for the key as an int or uint.
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := m.findInternal(Int(ik)); found {
+ return keyVal, true
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ return m.findInternal(Int(ik))
+ }
+ }
+ return nil, false
+}
+
+// findInternal attempts to convert the incoming key to the map's internal native type
+// and then returns the value, if found.
+func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) {
+ k, err := key.ConvertToNative(m.keyType)
+ if err != nil {
+ return nil, false
+ }
+ refKey := reflect.ValueOf(k)
+ val := m.refValue.MapIndex(refKey)
+ if val.IsValid() {
+ return m.NativeToValue(val.Interface()), true
+ }
+ return nil, false
+}
+
+// Iterator creates a Golang reflection based traits.Iterator.
+func (m *reflectMapAccessor) Iterator() traits.Iterator {
+ return &mapIterator{
+ Adapter: m.Adapter,
+ mapKeys: m.refValue.MapRange(),
+ len: m.refValue.Len(),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (m *reflectMapAccessor) Fold(f traits.Folder) {
+ mapRange := m.refValue.MapRange()
+ for mapRange.Next() {
+ if !f.FoldEntry(mapRange.Key().Interface(), mapRange.Value().Interface()) {
+ break
+ }
+ }
+}
+
+func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor {
+ return &refValMapAccessor{mapVal: mapVal}
+}
+
+type refValMapAccessor struct {
+ mapVal map[ref.Val]ref.Val
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ if len(a.mapVal) == 0 {
+ return nil, false
+ }
+ if keyVal, found := a.mapVal[key]; found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := a.mapVal[Int(ik)]; found {
+ return keyVal, found
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ keyVal, found := a.mapVal[Uint(uk)]
+ return keyVal, found
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ keyVal, found := a.mapVal[Uint(uk)]
+ return keyVal, found
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ keyVal, found := a.mapVal[Int(ik)]
+ return keyVal, found
+ }
+ }
+ return nil, false
+}
+
+// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection.
+func (a *refValMapAccessor) Iterator() traits.Iterator {
+ return &mapIterator{
+ Adapter: DefaultTypeAdapter,
+ mapKeys: reflect.ValueOf(a.mapVal).MapRange(),
+ len: len(a.mapVal),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *refValMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+func newStringMapAccessor(strMap map[string]string) mapAccessor {
+ return &stringMapAccessor{mapVal: strMap}
+}
+
+type stringMapAccessor struct {
+ mapVal map[string]string
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.mapVal[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return String(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the string key set of the map.
+func (a *stringMapAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.mapVal))
+ i := 0
+ for k := range a.mapVal {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *stringMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor {
+ return &stringIfaceMapAccessor{
+ Adapter: adapter,
+ mapVal: mapVal,
+ }
+}
+
+type stringIfaceMapAccessor struct {
+ Adapter
+ mapVal map[string]any
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.mapVal[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return a.NativeToValue(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the string key set of the map.
+func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.mapVal))
+ i := 0
+ for k := range a.mapVal {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *stringIfaceMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
+// accessing protoreflect.Map values.
+type protoMap struct {
+ Adapter
+ value *pb.Map
+}
+
+// Contains returns whether the map contains the given key.
+func (m *protoMap) Contains(key ref.Val) ref.Val {
+ _, found := m.Find(key)
+ return Bool(found)
+}
+
+// ConvertToNative implements the ref.Val interface method.
+//
+// Note, assignment to Golang struct types is not yet supported.
+func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the map is already assignable to the desired type return it, e.g. interfaces and
+ // maps with the same key value types.
+ switch typeDesc {
+ case anyValueType:
+ json, err := m.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonStructType:
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{
+ Fields: jsonEntries.(map[string]*structpb.Value)}
+ if typeDesc == jsonStructType {
+ return jsonMap, nil
+ }
+ return structpb.NewStructValue(jsonMap), nil
+ }
+ switch typeDesc.Kind() {
+ case reflect.Struct, reflect.Ptr:
+ if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
+ return m.value, nil
+ }
+ if reflect.TypeOf(m).AssignableTo(typeDesc) {
+ return m, nil
+ }
+ }
+ if typeDesc.Kind() != reflect.Map {
+ return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc)
+ }
+
+ keyType := m.value.KeyType.ReflectType()
+ valType := m.value.ValueType.ReflectType()
+ otherKeyType := typeDesc.Key()
+ otherValType := typeDesc.Elem()
+ mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len())
+ var err error
+ m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
+ ntvKey := key.Interface()
+ ntvVal := val.Interface()
+ switch pv := ntvVal.(type) {
+ case protoreflect.Message:
+ ntvVal = pv.Interface()
+ }
+ if keyType == otherKeyType && valType == otherValType {
+ mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
+ return true
+ }
+ celKey := m.NativeToValue(ntvKey)
+ celVal := m.NativeToValue(ntvVal)
+ ntvKey, err = celKey.ConvertToNative(otherKeyType)
+ if err != nil {
+ // early terminate the range loop.
+ return false
+ }
+ ntvVal, err = celVal.ConvertToNative(otherValType)
+ if err != nil {
+ // early terminate the range loop.
+ return false
+ }
+ mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return mapVal.Interface(), nil
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (m *protoMap) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(traits.Mapper)
+ if !ok {
+ return False
+ }
+ if m.value.Map.Len() != int(otherMap.Size().(Int)) {
+ return False
+ }
+ var retVal ref.Val = True
+ m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
+ keyVal := m.NativeToValue(key.Interface())
+ valVal := m.NativeToValue(val)
+ otherVal, found := otherMap.Find(keyVal)
+ if !found {
+ retVal = False
+ return false
+ }
+ valEq := Equal(valVal, otherVal)
+ if valEq != True {
+ retVal = valEq
+ return false
+ }
+ return true
+ })
+ return retVal
+}
+
+// Find returns whether the protoreflect.Map contains the input key.
+//
+// If the key is not found the function returns (nil, false).
+func (m *protoMap) Find(key ref.Val) (ref.Val, bool) {
+ if keyVal, found := m.findInternal(key); found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ // Double is not a valid proto map key type, so check for the key as an int or uint.
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := m.findInternal(Int(ik)); found {
+ return keyVal, true
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ return m.findInternal(Int(ik))
+ }
+ }
+ return nil, false
+}
+
+// findInternal attempts to convert the incoming key to the map's internal native type
+// and then returns the value, if found.
+func (m *protoMap) findInternal(key ref.Val) (ref.Val, bool) {
+ // Convert the input key to the expected protobuf key type.
+ ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType())
+ if err != nil {
+ return nil, false
+ }
+ // Use protoreflection to get the key value.
+ val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey())
+ if !val.IsValid() {
+ return nil, false
+ }
+ // Perform nominal type unwrapping from the input value.
+ switch v := val.Interface().(type) {
+ case protoreflect.List, protoreflect.Map:
+ // Maps do not support list or map values
+ return nil, false
+ default:
+ return m.NativeToValue(v), true
+ }
+}
+
+// Get implements the traits.Indexer interface method.
+func (m *protoMap) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if !found {
+ return ValOrErr(v, "no such key: %v", key)
+ }
+ return v
+}
+
+// IsZeroValue returns true if the map is empty.
+func (m *protoMap) IsZeroValue() bool {
+ return m.value.Len() == 0
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (m *protoMap) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]protoreflect.MapKey, 0, m.value.Len())
+ m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ mapKeys = append(mapKeys, k)
+ return true
+ })
+ return &protoMapIterator{
+ Adapter: m.Adapter,
+ mapKeys: mapKeys,
+ len: m.value.Len(),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (m *protoMap) Fold(f traits.Folder) {
+ m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ return f.FoldEntry(k.Interface(), v.Interface())
+ })
+}
+
+// Size returns the number of entries in the protoreflect.Map.
+func (m *protoMap) Size() ref.Val {
+ return Int(m.value.Len())
+}
+
+// Type implements the ref.Val interface method.
+func (m *protoMap) Type() ref.Type {
+ return MapType
+}
+
+// Value implements the ref.Val interface method.
+func (m *protoMap) Value() any {
+ return m.value
+}
+
+type mapIterator struct {
+ *baseIterator
+ Adapter
+ mapKeys *reflect.MapIter
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *mapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *mapIterator) Next() ref.Val {
+ if it.HasNext() == True && it.mapKeys.Next() {
+ it.cursor++
+ refKey := it.mapKeys.Key()
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
+
+type protoMapIterator struct {
+ *baseIterator
+ Adapter
+ mapKeys []protoreflect.MapKey
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *protoMapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *protoMapIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ refKey := it.mapKeys[index]
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
+
+type stringKeyIterator struct {
+ *baseIterator
+ mapKeys []string
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *stringKeyIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *stringKeyIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return String(it.mapKeys[index])
+ }
+ return nil
+}
+
+// ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration.
+//
+// For values which are already Foldable, this call is a no-op. For all other values, the fold
+// is driven via the Iterator HasNext() and Next() calls as well as the map's Get() method
+// which means that the folding will function, but take a performance hit.
+func ToFoldableMap(m traits.Mapper) traits.Foldable {
+ if f, ok := m.(traits.Foldable); ok {
+ return f
+ }
+ return interopFoldableMap{Mapper: m}
+}
+
+type interopFoldableMap struct {
+ traits.Mapper
+}
+
+func (m interopFoldableMap) Fold(f traits.Folder) {
+ it := m.Iterator()
+ for it.HasNext() == True {
+ k := it.Next()
+ if !f.FoldEntry(k, m.Get(k)) {
+ break
+ }
+ }
+}
+
+// InsertMapKeyValue inserts a key, value pair into the target map if the target map does not
+// already contain the given key.
+//
+// If the map is mutable, it is modified in-place per the MutableMapper contract.
+// If the map is not mutable, a copy containing the new key, value pair is made.
+func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val {
+ if mutable, ok := m.(traits.MutableMapper); ok {
+ return mutable.Insert(k, v)
+ }
+
+ // Otherwise perform the slow version of the insertion which makes a copy of the incoming map.
+ if _, found := m.Find(k); !found {
+ size := m.Size().(Int)
+ copy := make(map[ref.Val]ref.Val, size+1)
+ copy[k] = v
+ it := m.Iterator()
+ for it.HasNext() == True {
+ nextK := it.Next()
+ nextV := m.Get(nextK)
+ copy[nextK] = nextV
+ }
+ return DefaultTypeAdapter.NativeToValue(copy)
+ }
+ return NewErr("insert failed: key %v already exists", k)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/null.go b/hack/tools/vendor/github.com/google/cel-go/common/types/null.go
new file mode 100644
index 0000000000..36514ff200
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/null.go
@@ -0,0 +1,119 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Null type implementation.
+type Null structpb.NullValue
+
+var (
+ // NullValue singleton.
+ NullValue = Null(structpb.NullValue_NULL_VALUE)
+
+ // golang reflect type for Null values.
+ nullReflectType = reflect.TypeOf(NullValue)
+
+ protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int32:
+ switch typeDesc {
+ case jsonNullType:
+ return structpb.NullValue_NULL_VALUE, nil
+ case nullReflectType:
+ return n, nil
+ }
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Convert to a JSON-null before packing to an Any field since the enum value for JSON
+ // null cannot be packed directly.
+ pb, err := n.ConvertToNative(jsonValueType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(pb.(proto.Message))
+ case jsonValueType:
+ return structpb.NewNullValue(), nil
+ case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType,
+ int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType,
+ uint64WrapperType, durationValueType, timestampValueType, protoIfaceType:
+ return nil, nil
+ case jsonListValueType, jsonStructType:
+ // skip handling
+ default:
+ if typeDesc.Implements(protoIfaceType) {
+ return nil, nil
+ }
+ }
+ case reflect.Interface:
+ nv := n.Value()
+ if reflect.TypeOf(nv).Implements(typeDesc) {
+ return nv, nil
+ }
+ if reflect.TypeOf(n).Implements(typeDesc) {
+ return n, nil
+ }
+ }
+ // If the type conversion isn't supported return an error.
+ return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (n Null) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String("null")
+ case NullType:
+ return n
+ case TypeType:
+ return NullType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (n Null) Equal(other ref.Val) ref.Val {
+ return Bool(NullType == other.Type())
+}
+
+// IsZeroValue returns true as null always represents an absent value.
+func (n Null) IsZeroValue() bool {
+ return true
+}
+
+// Type implements ref.Val.Type.
+func (n Null) Type() ref.Type {
+ return NullType
+}
+
+// Value implements ref.Val.Value.
+func (n Null) Value() any {
+ return structpb.NullValue_NULL_VALUE
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/object.go b/hack/tools/vendor/github.com/google/cel-go/common/types/object.go
new file mode 100644
index 0000000000..8ba0af9fbe
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/object.go
@@ -0,0 +1,165 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+type protoObj struct {
+ Adapter
+ value proto.Message
+ typeDesc *pb.TypeDescription
+ typeValue ref.Val
+}
+
+// NewObject returns an object based on a proto.Message value which handles
+// conversion between protobuf type values and expression type values.
+// Objects support indexing and iteration.
+//
+// Note: the type value is pulled from the list of registered types within the
+// type provider. If the proto type is not registered within the type provider,
+// then this will result in an error within the type adapter / provider.
+func NewObject(adapter Adapter,
+ typeDesc *pb.TypeDescription,
+ typeValue ref.Val,
+ value proto.Message) ref.Val {
+ return &protoObj{
+ Adapter: adapter,
+ value: value,
+ typeDesc: typeDesc,
+ typeValue: typeValue}
+}
+
+func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ srcPB := o.value
+ if reflect.TypeOf(srcPB).AssignableTo(typeDesc) {
+ return srcPB, nil
+ }
+ if reflect.TypeOf(o).AssignableTo(typeDesc) {
+ return o, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ _, isAny := srcPB.(*anypb.Any)
+ if isAny {
+ return srcPB, nil
+ }
+ return anypb.New(srcPB)
+ case jsonValueType:
+ // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no
+ // support for direct conversion from proto.Message to protobuf.Value.
+ bytes, err := protojson.Marshal(srcPB)
+ if err != nil {
+ return nil, err
+ }
+ json := &structpb.Value{}
+ err = protojson.Unmarshal(bytes, json)
+ if err != nil {
+ return nil, err
+ }
+ return json, nil
+ default:
+ if typeDesc == o.typeDesc.ReflectType() {
+ return o.value, nil
+ }
+ if typeDesc.Kind() == reflect.Ptr {
+ val := reflect.New(typeDesc.Elem()).Interface()
+ dstPB, ok := val.(proto.Message)
+ if ok {
+ err := pb.Merge(dstPB, srcPB)
+ if err != nil {
+ return nil, fmt.Errorf("type conversion error: %v", err)
+ }
+ return dstPB, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc)
+}
+
+func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ default:
+ if o.Type().TypeName() == typeVal.TypeName() {
+ return o
+ }
+ case TypeType:
+ return o.typeValue
+ }
+ return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal)
+}
+
+func (o *protoObj) Equal(other ref.Val) ref.Val {
+ otherPB, ok := other.Value().(proto.Message)
+ return Bool(ok && pb.Equal(o.value, otherPB))
+}
+
+// IsSet tests whether a field which is defined is set to a non-default value.
+func (o *protoObj) IsSet(field ref.Val) ref.Val {
+ protoFieldName, ok := field.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(field)
+ }
+ protoFieldStr := string(protoFieldName)
+ fd, found := o.typeDesc.FieldByName(protoFieldStr)
+ if !found {
+ return NewErr("no such field '%s'", field)
+ }
+ if fd.IsSet(o.value) {
+ return True
+ }
+ return False
+}
+
+// IsZeroValue returns true if the protobuf object is empty.
+func (o *protoObj) IsZeroValue() bool {
+ return proto.Equal(o.value, o.typeDesc.Zero())
+}
+
+func (o *protoObj) Get(index ref.Val) ref.Val {
+ protoFieldName, ok := index.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(index)
+ }
+ protoFieldStr := string(protoFieldName)
+ fd, found := o.typeDesc.FieldByName(protoFieldStr)
+ if !found {
+ return NewErr("no such field '%s'", index)
+ }
+ fv, err := fd.GetFrom(o.value)
+ if err != nil {
+ return NewErr(err.Error())
+ }
+ return o.NativeToValue(fv)
+}
+
+func (o *protoObj) Type() ref.Type {
+ return o.typeValue.(ref.Type)
+}
+
+func (o *protoObj) Value() any {
+ return o.value
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/optional.go b/hack/tools/vendor/github.com/google/cel-go/common/types/optional.go
new file mode 100644
index 0000000000..97845a740c
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/optional.go
@@ -0,0 +1,108 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ // OptionalType indicates the runtime type of an optional value.
+ OptionalType = NewOpaqueType("optional_type")
+
+ // OptionalNone is a sentinel value which is used to indicate an empty optional value.
+ OptionalNone = &Optional{}
+)
+
+// OptionalOf returns an optional value which wraps a concrete CEL value.
+func OptionalOf(value ref.Val) *Optional {
+ return &Optional{value: value}
+}
+
+// Optional value which points to a value if non-empty.
+type Optional struct {
+ value ref.Val
+}
+
+// HasValue returns true if the optional has a value.
+func (o *Optional) HasValue() bool {
+ return o.value != nil
+}
+
+// GetValue returns the wrapped value contained in the optional.
+func (o *Optional) GetValue() ref.Val {
+ if !o.HasValue() {
+ return NewErr("optional.none() dereference")
+ }
+ return o.value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if !o.HasValue() {
+ return nil, errors.New("optional.none() dereference")
+ }
+ return o.value.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case OptionalType:
+ return o
+ case TypeType:
+ return OptionalType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal)
+}
+
+// Equal determines whether the values contained by two optional values are equal.
+func (o *Optional) Equal(other ref.Val) ref.Val {
+ otherOpt, isOpt := other.(*Optional)
+ if !isOpt {
+ return False
+ }
+ if !o.HasValue() {
+ return Bool(!otherOpt.HasValue())
+ }
+ if !otherOpt.HasValue() {
+ return False
+ }
+ return o.value.Equal(otherOpt.value)
+}
+
+func (o *Optional) String() string {
+ if o.HasValue() {
+ return fmt.Sprintf("optional(%v)", o.GetValue())
+ }
+ return "optional.none()"
+}
+
+// Type implements the ref.Val interface method.
+func (o *Optional) Type() ref.Type {
+ return OptionalType
+}
+
+// Value returns the underlying 'Value()' of the wrapped value, if present.
+func (o *Optional) Value() any {
+ if o.value == nil {
+ return nil
+ }
+ return o.value.Value()
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/overflow.go b/hack/tools/vendor/github.com/google/cel-go/common/types/overflow.go
new file mode 100644
index 0000000000..dcb66ef596
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/overflow.go
@@ -0,0 +1,429 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "math"
+ "time"
+)
+
+var (
+ doubleTwoTo64 = math.Ldexp(1.0, 64)
+)
+
+// addInt64Checked performs addition with overflow detection of two int64 values.
+//
+// If the operation fails the error return value will be non-nil.
+func addInt64Checked(x, y int64) (int64, error) {
+ if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) {
+ return 0, errIntOverflow
+ }
+ return x + y, nil
+}
+
+// subtractInt64Checked performs subtraction with overflow detection of two int64 values.
+//
+// If the operation fails the error return value will be non-nil.
+func subtractInt64Checked(x, y int64) (int64, error) {
+ if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) {
+ return 0, errIntOverflow
+ }
+ return x - y, nil
+}
+
+// negateInt64Checked performs negation with overflow detection of an int64.
+//
+// If the operation fails the error return value will be non-nil.
+func negateInt64Checked(x int64) (int64, error) {
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 {
+ return 0, errIntOverflow
+ }
+ return -x, nil
+}
+
+// multiplyInt64Checked performs multiplication with overflow detection of two int64 value.
+//
+// If the operation fails the error return value will be non-nil.
+func multiplyInt64Checked(x, y int64) (int64, error) {
+ // Detecting multiplication overflow is more complicated than the others. The first two detect
+ // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal
+ // overflow conditions.
+ if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) ||
+ // x is positive, y is positive
+ (x > 0 && y > 0 && x > math.MaxInt64/y) ||
+ // x is positive, y is negative
+ (x > 0 && y < 0 && y < math.MinInt64/x) ||
+ // x is negative, y is positive
+ (x < 0 && y > 0 && x < math.MinInt64/y) ||
+ // x is negative, y is negative
+ (x < 0 && y < 0 && y < math.MaxInt64/x) {
+ return 0, errIntOverflow
+ }
+ return x * y, nil
+}
+
+// divideInt64Checked performs division with overflow detection of two int64 values,
+// as well as a division by zero check.
+//
+// If the operation fails the error return value will be non-nil.
+func divideInt64Checked(x, y int64) (int64, error) {
+ // Division by zero.
+ if y == 0 {
+ return 0, errDivideByZero
+ }
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 && y == -1 {
+ return 0, errIntOverflow
+ }
+ return x / y, nil
+}
+
+// moduloInt64Checked performs modulo with overflow detection of two int64 values
+// as well as a modulus by zero check.
+//
+// If the operation fails the error return value will be non-nil.
+func moduloInt64Checked(x, y int64) (int64, error) {
+ // Modulus by zero.
+ if y == 0 {
+ return 0, errModulusByZero
+ }
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 && y == -1 {
+ return 0, errIntOverflow
+ }
+ return x % y, nil
+}
+
+// addUint64Checked performs addition with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addUint64Checked(x, y uint64) (uint64, error) {
+ if y > 0 && x > math.MaxUint64-y {
+ return 0, errUintOverflow
+ }
+ return x + y, nil
+}
+
+// subtractUint64Checked performs subtraction with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractUint64Checked(x, y uint64) (uint64, error) {
+ if y > x {
+ return 0, errUintOverflow
+ }
+ return x - y, nil
+}
+
+// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func multiplyUint64Checked(x, y uint64) (uint64, error) {
+ if y != 0 && x > math.MaxUint64/y {
+ return 0, errUintOverflow
+ }
+ return x * y, nil
+}
+
+// divideUint64Checked performs division with a test for division by zero.
+//
+// If the operation fails the error return value will be non-nil.
+func divideUint64Checked(x, y uint64) (uint64, error) {
+ if y == 0 {
+ return 0, errDivideByZero
+ }
+ return x / y, nil
+}
+
+// moduloUint64Checked performs modulo with a test for modulus by zero.
+//
+// If the operation fails the error return value will be non-nil.
+func moduloUint64Checked(x, y uint64) (uint64, error) {
+ if y == 0 {
+ return 0, errModulusByZero
+ }
+ return x % y, nil
+}
+
+// addDurationChecked performs addition with overflow detection of two time.Durations.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addDurationChecked(x, y time.Duration) (time.Duration, error) {
+ val, err := addInt64Checked(int64(x), int64(y))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// subtractDurationChecked performs subtraction with overflow detection of two time.Durations.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractDurationChecked(x, y time.Duration) (time.Duration, error) {
+ val, err := subtractInt64Checked(int64(x), int64(y))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// negateDurationChecked performs negation with overflow detection of a time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func negateDurationChecked(x time.Duration) (time.Duration, error) {
+ val, err := negateInt64Checked(int64(x))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
+ // This is tricky. A time is represented as (int64, int32) where the first is seconds and second
+ // is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64
+ // as it could potentially overflow. The only way to proceed is to break time and duration into
+ // second and nanosecond components.
+
+ // First we break time into its components by truncating and subtracting.
+ sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Second we break duration into its components by dividing and modulo.
+ sec2 := int64(y) / int64(time.Second) // Truncate to seconds.
+ nsec2 := int64(y) % int64(time.Second) // Get remainder.
+
+ // Add seconds first, detecting any overflow.
+ sec, err := addInt64Checked(sec1, sec2)
+ if err != nil {
+ return time.Time{}, err
+ }
+ // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
+ nsec := nsec1 + nsec2
+
+ // We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds.
+ // Adapted from time.Unix(int64, int64).
+ if nsec < 0 || nsec >= int64(time.Second) {
+ // Add seconds.
+ sec, err = addInt64Checked(sec, nsec/int64(time.Second))
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ nsec -= (nsec / int64(time.Second)) * int64(time.Second)
+ if nsec < 0 {
+ // Subtract an extra second
+ sec, err = addInt64Checked(sec, -1)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nsec += int64(time.Second)
+ }
+ }
+
+ // Check if the the number of seconds from Unix epoch is within our acceptable range.
+ if sec < minUnixTime || sec > maxUnixTime {
+ return time.Time{}, errTimestampOverflow
+ }
+
+ // Return resulting time and propagate time zone.
+ return time.Unix(sec, nsec).In(x.Location()), nil
+}
+
+// subtractTimeChecked performs subtraction with overflow detection of two time.Time.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractTimeChecked(x, y time.Time) (time.Duration, error) {
+ // Similar to addTimeDurationOverflow() above.
+
+ // First we break time into its components by truncating and subtracting.
+ sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Second we break duration into its components by truncating and subtracting.
+ sec2 := y.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Subtract seconds first, detecting any overflow.
+ sec, err := subtractInt64Checked(sec1, sec2)
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
+ nsec := nsec1 - nsec2
+
+ // Scale seconds to nanoseconds detecting overflow.
+ tsec, err := multiplyInt64Checked(sec, int64(time.Second))
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ // Lastly we need to add the two nanoseconds together.
+ val, err := addInt64Checked(tsec, nsec)
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ return time.Duration(val), nil
+}
+
+// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and
+// time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
+ // The easiest way to implement this is to negate y and add them.
+ // x - y = x + -y
+ val, err := negateDurationChecked(y)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return addTimeDurationChecked(x, val)
+}
+
+// doubleToInt64Checked converts a double to an int64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func doubleToInt64Checked(v float64) (int64, error) {
+ if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) {
+ return 0, errIntOverflow
+ }
+ return int64(v), nil
+}
+
+// doubleToInt64Checked converts a double to a uint64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func doubleToUint64Checked(v float64) (uint64, error) {
+ if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 {
+ return 0, errUintOverflow
+ }
+ return uint64(v), nil
+}
+
+// int64ToUint64Checked converts an int64 to a uint64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToUint64Checked(v int64) (uint64, error) {
+ if v < 0 {
+ return 0, errUintOverflow
+ }
+ return uint64(v), nil
+}
+
+// int64ToInt8Checked converts an int64 to an int8 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt8Checked(v int64) (int8, error) {
+ if v < math.MinInt8 || v > math.MaxInt8 {
+ return 0, errIntOverflow
+ }
+ return int8(v), nil
+}
+
+// int64ToInt16Checked converts an int64 to an int16 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt16Checked(v int64) (int16, error) {
+ if v < math.MinInt16 || v > math.MaxInt16 {
+ return 0, errIntOverflow
+ }
+ return int16(v), nil
+}
+
+// int64ToInt32Checked converts an int64 to an int32 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt32Checked(v int64) (int32, error) {
+ if v < math.MinInt32 || v > math.MaxInt32 {
+ return 0, errIntOverflow
+ }
+ return int32(v), nil
+}
+
+// uint64ToUint8Checked converts a uint64 to a uint8 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint8Checked(v uint64) (uint8, error) {
+ if v > math.MaxUint8 {
+ return 0, errUintOverflow
+ }
+ return uint8(v), nil
+}
+
+// uint64ToUint16Checked converts a uint64 to a uint16 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint16Checked(v uint64) (uint16, error) {
+ if v > math.MaxUint16 {
+ return 0, errUintOverflow
+ }
+ return uint16(v), nil
+}
+
+// uint64ToUint32Checked converts a uint64 to a uint32 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint32Checked(v uint64) (uint32, error) {
+ if v > math.MaxUint32 {
+ return 0, errUintOverflow
+ }
+ return uint32(v), nil
+}
+
+// uint64ToInt64Checked converts a uint64 to an int64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToInt64Checked(v uint64) (int64, error) {
+ if v > math.MaxInt64 {
+ return 0, errIntOverflow
+ }
+ return int64(v), nil
+}
+
+func doubleToUint64Lossless(v float64) (uint64, bool) {
+ u, err := doubleToUint64Checked(v)
+ if err != nil {
+ return 0, false
+ }
+ if float64(u) != v {
+ return 0, false
+ }
+ return u, true
+}
+
+func doubleToInt64Lossless(v float64) (int64, bool) {
+ i, err := doubleToInt64Checked(v)
+ if err != nil {
+ return 0, false
+ }
+ if float64(i) != v {
+ return 0, false
+ }
+ return i, true
+}
+
+func int64ToUint64Lossless(v int64) (uint64, bool) {
+ u, err := int64ToUint64Checked(v)
+ return u, err == nil
+}
+
+func uint64ToInt64Lossless(v uint64) (int64, bool) {
+ i, err := uint64ToInt64Checked(v)
+ return i, err == nil
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
new file mode 100644
index 0000000000..e2b9d37b56
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
@@ -0,0 +1,53 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "checked.go",
+ "enum.go",
+ "equal.go",
+ "file.go",
+ "pb.go",
+ "type.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/pb",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protowire:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "equal_test.go",
+ "file_test.go",
+ "pb_test.go",
+ "type_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//test/proto2pb:test_all_types_go_proto",
+ "//test/proto3pb:test_all_types_go_proto",
+ "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/checked.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/checked.go
new file mode 100644
index 0000000000..312a6a072f
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/checked.go
@@ -0,0 +1,93 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ // CheckedPrimitives map from proto field descriptor type to expr.Type.
+ CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{
+ protoreflect.BoolKind: checkedBool,
+ protoreflect.BytesKind: checkedBytes,
+ protoreflect.DoubleKind: checkedDouble,
+ protoreflect.FloatKind: checkedDouble,
+ protoreflect.Int32Kind: checkedInt,
+ protoreflect.Int64Kind: checkedInt,
+ protoreflect.Sint32Kind: checkedInt,
+ protoreflect.Sint64Kind: checkedInt,
+ protoreflect.Uint32Kind: checkedUint,
+ protoreflect.Uint64Kind: checkedUint,
+ protoreflect.Fixed32Kind: checkedUint,
+ protoreflect.Fixed64Kind: checkedUint,
+ protoreflect.Sfixed32Kind: checkedInt,
+ protoreflect.Sfixed64Kind: checkedInt,
+ protoreflect.StringKind: checkedString}
+
+ // CheckedWellKnowns map from qualified proto type name to expr.Type for
+ // well-known proto types.
+ CheckedWellKnowns = map[string]*exprpb.Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": checkedWrap(checkedBool),
+ "google.protobuf.BytesValue": checkedWrap(checkedBytes),
+ "google.protobuf.DoubleValue": checkedWrap(checkedDouble),
+ "google.protobuf.FloatValue": checkedWrap(checkedDouble),
+ "google.protobuf.Int64Value": checkedWrap(checkedInt),
+ "google.protobuf.Int32Value": checkedWrap(checkedInt),
+ "google.protobuf.UInt64Value": checkedWrap(checkedUint),
+ "google.protobuf.UInt32Value": checkedWrap(checkedUint),
+ "google.protobuf.StringValue": checkedWrap(checkedString),
+ // Well-known types.
+ "google.protobuf.Any": checkedAny,
+ "google.protobuf.Duration": checkedDuration,
+ "google.protobuf.Timestamp": checkedTimestamp,
+ // Json types.
+ "google.protobuf.ListValue": checkedListDyn,
+ "google.protobuf.NullValue": checkedNull,
+ "google.protobuf.Struct": checkedMapStringDyn,
+ "google.protobuf.Value": checkedDyn,
+ }
+
+ // common types
+ checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}
+ // Wrapper and primitive types.
+ checkedBool = checkedPrimitive(exprpb.Type_BOOL)
+ checkedBytes = checkedPrimitive(exprpb.Type_BYTES)
+ checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE)
+ checkedInt = checkedPrimitive(exprpb.Type_INT64)
+ checkedString = checkedPrimitive(exprpb.Type_STRING)
+ checkedUint = checkedPrimitive(exprpb.Type_UINT64)
+ // Well-known type equivalents.
+ checkedAny = checkedWellKnown(exprpb.Type_ANY)
+ checkedDuration = checkedWellKnown(exprpb.Type_DURATION)
+ checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP)
+ // Json-based type equivalents.
+ checkedNull = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ checkedListDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}}
+ checkedMapStringDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: checkedString,
+ ValueType: checkedDyn}}}
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/enum.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/enum.go
new file mode 100644
index 0000000000..09a1546308
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/enum.go
@@ -0,0 +1,44 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// newEnumValueDescription produces an enum value description with the fully qualified enum value
+// name and the enum value descriptor.
+func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
+ return &EnumValueDescription{
+ enumValueName: name,
+ desc: desc,
+ }
+}
+
+// EnumValueDescription maps a fully-qualified enum value name to its numeric value.
+type EnumValueDescription struct {
+ enumValueName string
+ desc protoreflect.EnumValueDescriptor
+}
+
+// Name returns the fully-qualified identifier name for the enum value.
+func (ed *EnumValueDescription) Name() string {
+ return ed.enumValueName
+}
+
+// Value returns the (numeric) value of the enum.
+func (ed *EnumValueDescription) Value() int32 {
+ return int32(ed.desc.Number())
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/equal.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/equal.go
new file mode 100644
index 0000000000..76893d85ea
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/equal.go
@@ -0,0 +1,206 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "bytes"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// Equal returns whether two proto.Message instances are equal using the following criteria:
+//
+// - Messages must share the same instance of the type descriptor
+// - Known set fields are compared using semantics equality
+// - Bytes are compared using bytes.Equal
+// - Scalar values are compared with operator ==
+// - List and map types are equal if they have the same length and all elements are equal
+// - Messages are equal if they share the same descriptor and all set fields are equal
+// - Unknown fields are compared using byte equality
+// - NaN values are not equal to each other
+// - google.protobuf.Any values are unpacked before comparison
+// - If the type descriptor for a protobuf.Any cannot be found, byte equality is used rather than
+// semantic equality.
+//
+// This method of proto equality mirrors the behavior of the C++ protobuf MessageDifferencer
+// whereas the golang proto.Equal implementation mirrors the Java protobuf equals() methods
+// behaviors which needed to treat NaN values as equal due to Java semantics.
+func Equal(x, y proto.Message) bool {
+ if x == nil || y == nil {
+ return x == nil && y == nil
+ }
+ xRef := x.ProtoReflect()
+ yRef := y.ProtoReflect()
+ return equalMessage(xRef, yRef)
+}
+
+func equalMessage(mx, my protoreflect.Message) bool {
+ // Note, the original proto.Equal upon which this implementation is based does not specifically handle the
+ // case when both messages are invalid. It is assumed that the descriptors will be equal and that byte-wise
+ // comparison will be used, though the semantics of validity are neither clear, nor promised within the
+ // proto.Equal implementation.
+ if mx.IsValid() != my.IsValid() || mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ // This is an innovation on the default proto.Equal where protobuf.Any values are unpacked before comparison
+ // as otherwise the Any values are compared by bytes rather than structurally.
+ if isAny(mx) && isAny(my) {
+ ax := mx.Interface().(*anypb.Any)
+ ay := my.Interface().(*anypb.Any)
+ // If the values are not the same type url, return false.
+ if ax.GetTypeUrl() != ay.GetTypeUrl() {
+ return false
+ }
+ // If the values are byte equal, then return true.
+ if bytes.Equal(ax.GetValue(), ay.GetValue()) {
+ return true
+ }
+ // Otherwise fall through to the semantic comparison of the any values.
+ x, err := ax.UnmarshalNew()
+ if err != nil {
+ return false
+ }
+ y, err := ay.UnmarshalNew()
+ if err != nil {
+ return false
+ }
+ // Recursively compare the unwrapped messages to ensure nested Any values are unwrapped accordingly.
+ return equalMessage(x.ProtoReflect(), y.ProtoReflect())
+ }
+
+ // Walk the set fields to determine field-wise equality
+ nx := 0
+ equal := true
+ mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
+ nx++
+ equal = my.Has(fd) && equalField(fd, vx, my.Get(fd))
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ // Establish the count of set fields on message y
+ ny := 0
+ my.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+ ny++
+ return true
+ })
+ // If the number of set fields is not equal return false.
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
+ switch {
+ case fd.IsMap():
+ return equalMap(fd, x.Map(), y.Map())
+ case fd.IsList():
+ return equalList(fd, x.List(), y.List())
+ default:
+ return equalValue(fd, x, y)
+ }
+}
+
+func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
+ return equal
+ })
+ return equal
+}
+
+func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(fd, x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return x.Bool() == y.Bool()
+ case protoreflect.EnumKind:
+ return x.Enum() == y.Enum()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind,
+ protoreflect.Int64Kind, protoreflect.Sint64Kind,
+ protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+ return x.Int() == y.Int()
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
+ protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+ return x.Uint() == y.Uint()
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ return x.Float() == y.Float()
+ case protoreflect.StringKind:
+ return x.String() == y.String()
+ case protoreflect.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return equalMessage(x.Message(), y.Message())
+ default:
+ return x.Interface() == y.Interface()
+ }
+}
+
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ lenX := len(x)
+ lenY := len(y)
+ if lenX != lenY {
+ return false
+ }
+ if lenX == 0 {
+ return true
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
+
+func isAny(m protoreflect.Message) bool {
+ return string(m.Descriptor().FullName()) == "google.protobuf.Any"
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/file.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/file.go
new file mode 100644
index 0000000000..e323afb1df
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/file.go
@@ -0,0 +1,202 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
+)
+
+// newFileDescription returns a FileDescription instance with a complete listing of all the message
+// types and enum values, as well as a map of extensions declared within any scope in the file.
+func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) {
+ metadata := collectFileMetadata(fileDesc)
+ enums := make(map[string]*EnumValueDescription)
+ for name, enumVal := range metadata.enumValues {
+ enums[name] = newEnumValueDescription(name, enumVal)
+ }
+ types := make(map[string]*TypeDescription)
+ for name, msgType := range metadata.msgTypes {
+ types[name] = newTypeDescription(name, msgType, pbdb.extensions)
+ }
+ fileExtMap := make(extensionMap)
+ for typeName, extensions := range metadata.msgExtensionMap {
+ messageExtMap, found := fileExtMap[typeName]
+ if !found {
+ messageExtMap = make(map[string]*FieldDescription)
+ }
+ for _, ext := range extensions {
+ extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor()
+ messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc)
+ }
+ fileExtMap[typeName] = messageExtMap
+ }
+ return &FileDescription{
+ name: fileDesc.Path(),
+ types: types,
+ enums: enums,
+ }, fileExtMap
+}
+
+// FileDescription holds a map of all types and enum values declared within a proto file.
+type FileDescription struct {
+ name string
+ types map[string]*TypeDescription
+ enums map[string]*EnumValueDescription
+}
+
+// Copy creates a copy of the FileDescription with updated Db references within its types.
+func (fd *FileDescription) Copy(pbdb *Db) *FileDescription {
+ typesCopy := make(map[string]*TypeDescription, len(fd.types))
+ for k, v := range fd.types {
+ typesCopy[k] = v.Copy(pbdb)
+ }
+ return &FileDescription{
+ name: fd.name,
+ types: typesCopy,
+ enums: fd.enums,
+ }
+}
+
+// GetName returns the fully qualified file path for the file.
+func (fd *FileDescription) GetName() string {
+ return fd.name
+}
+
+// GetEnumDescription returns an EnumDescription for a qualified enum value
+// name declared within the .proto file.
+func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) {
+ ed, found := fd.enums[sanitizeProtoName(enumName)]
+ return ed, found
+}
+
+// GetEnumNames returns the string names of all enum values in the file.
+func (fd *FileDescription) GetEnumNames() []string {
+ enumNames := make([]string, len(fd.enums))
+ i := 0
+ for _, e := range fd.enums {
+ enumNames[i] = e.Name()
+ i++
+ }
+ return enumNames
+}
+
+// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name
+// declared within the .proto file.
+func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) {
+ td, found := fd.types[sanitizeProtoName(typeName)]
+ return td, found
+}
+
+// GetTypeNames returns the list of all type names contained within the file.
+func (fd *FileDescription) GetTypeNames() []string {
+ typeNames := make([]string, len(fd.types))
+ i := 0
+ for _, t := range fd.types {
+ typeNames[i] = t.Name()
+ i++
+ }
+ return typeNames
+}
+
+// sanitizeProtoName strips the leading '.' from the proto message name.
+func sanitizeProtoName(name string) string {
+ if name != "" && name[0] == '.' {
+ return name[1:]
+ }
+ return name
+}
+
+// fileMetadata is a flattened view of message types and enum values within a file descriptor.
+type fileMetadata struct {
+ // msgTypes maps from fully-qualified message name to descriptor.
+ msgTypes map[string]protoreflect.MessageDescriptor
+ // enumValues maps from fully-qualified enum value to enum value descriptor.
+ enumValues map[string]protoreflect.EnumValueDescriptor
+ // msgExtensionMap maps from the protobuf message name being extended to a set of extensions
+ // for the type.
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor
+
+ // TODO: support enum type definitions for use in future type-check enhancements.
+}
+
+// collectFileMetadata traverses the proto file object graph to collect message types and enum
+// values and index them by their fully qualified names.
+func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata {
+ msgTypes := make(map[string]protoreflect.MessageDescriptor)
+ enumValues := make(map[string]protoreflect.EnumValueDescriptor)
+ msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor)
+ collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap)
+ collectEnumValues(fileDesc.Enums(), enumValues)
+ collectExtensions(fileDesc.Extensions(), msgExtensionMap)
+ return &fileMetadata{
+ msgTypes: msgTypes,
+ enumValues: enumValues,
+ msgExtensionMap: msgExtensionMap,
+ }
+}
+
+// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of
+// fully qualified protobuf names to descriptors.
+func collectMsgTypes(msgTypes protoreflect.MessageDescriptors,
+ msgTypeMap map[string]protoreflect.MessageDescriptor,
+ enumValueMap map[string]protoreflect.EnumValueDescriptor,
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < msgTypes.Len(); i++ {
+ msgType := msgTypes.Get(i)
+ msgTypeMap[string(msgType.FullName())] = msgType
+ nestedMsgTypes := msgType.Messages()
+ if nestedMsgTypes.Len() != 0 {
+ collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap)
+ }
+ nestedEnumTypes := msgType.Enums()
+ if nestedEnumTypes.Len() != 0 {
+ collectEnumValues(nestedEnumTypes, enumValueMap)
+ }
+ nestedExtensions := msgType.Extensions()
+ if nestedExtensions.Len() != 0 {
+ collectExtensions(nestedExtensions, msgExtensionMap)
+ }
+ }
+}
+
+// collectEnumValues accumulates the enum values within an enum declaration.
+func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
+ for i := 0; i < enumTypes.Len(); i++ {
+ enumType := enumTypes.Get(i)
+ enumTypeValues := enumType.Values()
+ for j := 0; j < enumTypeValues.Len(); j++ {
+ enumValue := enumTypeValues.Get(j)
+ enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name()))
+ enumValueMap[enumValueName] = enumValue
+ }
+ }
+}
+
+func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < extensions.Len(); i++ {
+ ext := extensions.Get(i)
+ extendsMsg := string(ext.ContainingMessage().FullName())
+ msgExts, found := msgExtensionMap[extendsMsg]
+ if !found {
+ msgExts = []protoreflect.ExtensionDescriptor{}
+ }
+ msgExts = append(msgExts, ext)
+ msgExtensionMap[extendsMsg] = msgExts
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/pb.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/pb.go
new file mode 100644
index 0000000000..eadebcb04e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/pb.go
@@ -0,0 +1,258 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pb reflects over protocol buffer descriptors to generate objects
+// that simplify type, enum, and field lookup.
+package pb
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durpb "google.golang.org/protobuf/types/known/durationpb"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tspb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Db maps from file / message / enum name to file description.
+//
+// Each Db is isolated from each other, and while information about protobuf descriptors may be
+// fetched from the global protobuf registry, no descriptors are added to this registry, else
+// the isolation guarantees of the Db object would be violated.
+type Db struct {
+ revFileDescriptorMap map[string]*FileDescription
+ // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db.
+ files []*FileDescription
+ // extensions contains the mapping between a given type name, extension name and its FieldDescription
+ extensions map[string]map[string]*FieldDescription
+}
+
+// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription
+type extensionMap = map[string]map[string]*FieldDescription
+
+var (
+ // DefaultDb used at evaluation time or unless overridden at check time.
+ DefaultDb = &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ files: []*FileDescription{},
+ extensions: make(extensionMap),
+ }
+)
+
+// Merge will copy the source proto message into the destination, or error if the merge cannot be completed.
+//
+// Unlike the proto.Merge, this method will fallback to proto.Marshal/Unmarshal of the two proto messages do not
+// share the same instance of their type descriptor.
+func Merge(dstPB, srcPB proto.Message) error {
+ src, dst := srcPB.ProtoReflect(), dstPB.ProtoReflect()
+ if src.Descriptor() == dst.Descriptor() {
+ proto.Merge(dstPB, srcPB)
+ return nil
+ }
+ if src.Descriptor().FullName() != dst.Descriptor().FullName() {
+ return fmt.Errorf("pb.Merge() arguments must be the same type. got: %v, %v",
+ dst.Descriptor().FullName(), src.Descriptor().FullName())
+ }
+ bytes, err := proto.Marshal(srcPB)
+ if err != nil {
+ return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to marshal source proto: %v", err)
+ }
+ err = proto.Unmarshal(bytes, dstPB)
+ if err != nil {
+ return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to unmarshal to dest proto: %v", err)
+ }
+ return nil
+}
+
+// NewDb creates a new `pb.Db` with an empty type name to file description map.
+func NewDb() *Db {
+ pbdb := &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ files: []*FileDescription{},
+ extensions: make(extensionMap),
+ }
+ // The FileDescription objects in the default db contain lazily initialized TypeDescription
+ // values which may point to the state contained in the DefaultDb irrespective of this shallow
+ // copy; however, the type graph for a field is idempotently computed, and is guaranteed to
+ // only be initialized once thanks to atomic values within the TypeDescription objects, so it
+ // is safe to share these values across instances.
+ for k, v := range DefaultDb.revFileDescriptorMap {
+ pbdb.revFileDescriptorMap[k] = v
+ }
+ pbdb.files = append(pbdb.files, DefaultDb.files...)
+ return pbdb
+}
+
+// Copy creates a copy of the current database with its own internal descriptor mapping.
+func (pbdb *Db) Copy() *Db {
+ copy := NewDb()
+ for _, fd := range pbdb.files {
+ hasFile := false
+ for _, fd2 := range copy.files {
+ if fd2 == fd {
+ hasFile = true
+ }
+ }
+ if !hasFile {
+ fd = fd.Copy(copy)
+ copy.files = append(copy.files, fd)
+ }
+ for _, enumValName := range fd.GetEnumNames() {
+ copy.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ copy.revFileDescriptorMap[msgTypeName] = fd
+ }
+ copy.revFileDescriptorMap[fd.GetName()] = fd
+ }
+ for typeName, extFieldMap := range pbdb.extensions {
+ copyExtFieldMap, found := copy.extensions[typeName]
+ if !found {
+ copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap))
+ }
+ for extFieldName, fd := range extFieldMap {
+ copyExtFieldMap[extFieldName] = fd
+ }
+ copy.extensions[typeName] = copyExtFieldMap
+ }
+ return copy
+}
+
+// FileDescriptions returns the set of file descriptions associated with this db.
+func (pbdb *Db) FileDescriptions() []*FileDescription {
+ return pbdb.files
+}
+
+// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the
+// message and enum types into the `pb.Db`.
+func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) {
+ fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()]
+ if found {
+ return fd, nil
+ }
+ // Make sure to search the global registry to see if a protoreflect.FileDescriptor for
+ // the file specified has been linked into the binary. If so, use the copy of the descriptor
+ // from the global cache.
+ //
+ // Note: Proto reflection relies on descriptor values being object equal rather than object
+ // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto
+ // will be incompatible with the FieldDescriptor in the global registry and any message created
+ // from that global registry.
+ globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path())
+ if err == nil {
+ fileDesc = globalFD
+ }
+ var fileExtMap extensionMap
+ fd, fileExtMap = newFileDescription(fileDesc, pbdb)
+ for _, enumValName := range fd.GetEnumNames() {
+ pbdb.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ pbdb.revFileDescriptorMap[msgTypeName] = fd
+ }
+ pbdb.revFileDescriptorMap[fd.GetName()] = fd
+
+ // Return the specific file descriptor registered.
+ pbdb.files = append(pbdb.files, fd)
+
+ // Index the protobuf message extensions from the file into the pbdb
+ for typeName, extMap := range fileExtMap {
+ typeExtMap, found := pbdb.extensions[typeName]
+ if !found {
+ pbdb.extensions[typeName] = extMap
+ continue
+ }
+ for extName, field := range extMap {
+ typeExtMap[extName] = field
+ }
+ }
+ return fd, nil
+}
+
+// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all
+// other definitions within the message file into the `pb.Db`.
+func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) {
+ msgDesc := message.ProtoReflect().Descriptor()
+ msgName := msgDesc.FullName()
+ typeName := sanitizeProtoName(string(msgName))
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd, nil
+ }
+ return pbdb.RegisterDescriptor(msgDesc.ParentFile())
+}
+
+// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the
+// `pb.Db`.
+func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) {
+ enumName = sanitizeProtoName(enumName)
+ if fd, found := pbdb.revFileDescriptorMap[enumName]; found {
+ return fd.GetEnumDescription(enumName)
+ }
+ return nil, false
+}
+
+// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`.
+func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) {
+ typeName = sanitizeProtoName(typeName)
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd.GetTypeDescription(typeName)
+ }
+ return nil, false
+}
+
+// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input
+// message is declared.
+func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor {
+ fdMap := map[string]protoreflect.FileDescriptor{}
+ parentFile := message.ProtoReflect().Descriptor().ParentFile()
+ fdMap[parentFile.Path()] = parentFile
+ // Initialize list of dependencies
+ deps := make([]protoreflect.FileImport, parentFile.Imports().Len())
+ for i := 0; i < parentFile.Imports().Len(); i++ {
+ deps[i] = parentFile.Imports().Get(i)
+ }
+ // Expand list for new dependencies
+ for i := 0; i < len(deps); i++ {
+ dep := deps[i]
+ if _, found := fdMap[dep.Path()]; found {
+ continue
+ }
+ fdMap[dep.Path()] = dep.FileDescriptor
+ for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ {
+ deps = append(deps, dep.FileDescriptor.Imports().Get(j))
+ }
+ }
+ return fdMap
+}
+
+func init() {
+ // Describe well-known types to ensure they can always be resolved by the check and interpret
+ // execution phases.
+ //
+ // The following subset of message types is enough to ensure that all well-known types can
+ // resolved in the runtime, since describing the value results in describing the whole file
+ // where the message is declared.
+ DefaultDb.RegisterMessage(&anypb.Any{})
+ DefaultDb.RegisterMessage(&durpb.Duration{})
+ DefaultDb.RegisterMessage(&emptypb.Empty{})
+ DefaultDb.RegisterMessage(&tspb.Timestamp{})
+ DefaultDb.RegisterMessage(&structpb.Value{})
+ DefaultDb.RegisterMessage(&wrapperspb.BoolValue{})
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/type.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/type.go
new file mode 100644
index 0000000000..bdd474c95a
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -0,0 +1,614 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// description is a private interface used to make it convenient to perform type unwrapping at
+// the TypeDescription or FieldDescription level.
+type description interface {
+ // Zero returns an empty immutable protobuf message when the description is a protobuf message
+ // type.
+ Zero() proto.Message
+}
+
+// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name
+// with a given descriptor.
+func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription {
+ msgType := dynamicpb.NewMessageType(desc)
+ msgZero := dynamicpb.NewMessage(desc)
+ fieldMap := map[string]*FieldDescription{}
+ fields := desc.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ f := fields.Get(i)
+ fieldMap[string(f.Name())] = newFieldDescription(f)
+ }
+ return &TypeDescription{
+ typeName: typeName,
+ desc: desc,
+ msgType: msgType,
+ fieldMap: fieldMap,
+ extensions: extensions,
+ reflectType: reflectTypeOf(msgZero),
+ zeroMsg: zeroValueOf(msgZero),
+ }
+}
+
+// TypeDescription is a collection of type metadata relevant to expression
+// checking and evaluation.
+type TypeDescription struct {
+ typeName string
+ desc protoreflect.MessageDescriptor
+ msgType protoreflect.MessageType
+ fieldMap map[string]*FieldDescription
+ extensions extensionMap
+ reflectType reflect.Type
+ zeroMsg proto.Message
+}
+
+// Copy copies the type description with updated references to the Db.
+func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription {
+ return &TypeDescription{
+ typeName: td.typeName,
+ desc: td.desc,
+ msgType: td.msgType,
+ fieldMap: td.fieldMap,
+ extensions: pbdb.extensions,
+ reflectType: td.reflectType,
+ zeroMsg: td.zeroMsg,
+ }
+}
+
+// FieldMap returns a string field name to FieldDescription map.
+func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
+ return td.fieldMap
+}
+
+// FieldByName returns (FieldDescription, true) if the field name is declared within the type.
+func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
+ fd, found := td.fieldMap[name]
+ if found {
+ return fd, true
+ }
+ extFieldMap, found := td.extensions[td.typeName]
+ if !found {
+ return nil, false
+ }
+ fd, found = extFieldMap[name]
+ return fd, found
+}
+
+// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible.
+//
+// This method returns the unwrapped value and 'true', else the original value and 'false'.
+func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) {
+ return unwrap(td, msg)
+}
+
+// Name returns the fully-qualified name of the type.
+func (td *TypeDescription) Name() string {
+ return string(td.desc.FullName())
+}
+
+// New returns a mutable proto message
+func (td *TypeDescription) New() protoreflect.Message {
+ return td.msgType.New()
+}
+
+// ReflectType returns the Golang reflect.Type for this type.
+func (td *TypeDescription) ReflectType() reflect.Type {
+ return td.reflectType
+}
+
+// Zero returns the zero proto.Message value for this type.
+func (td *TypeDescription) Zero() proto.Message {
+ return td.zeroMsg
+}
+
+// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
+func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
+ var reflectType reflect.Type
+ var zeroMsg proto.Message
+ switch fieldDesc.Kind() {
+ case protoreflect.EnumKind:
+ reflectType = reflectTypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ zeroMsg = dynamicpb.NewMessage(fieldDesc.Message())
+ reflectType = reflectTypeOf(zeroMsg)
+ default:
+ reflectType = reflectTypeOf(fieldDesc.Default().Interface())
+ if fieldDesc.IsList() {
+ var elemValue protoreflect.Value
+ if fieldDesc.IsExtension() {
+ et := dynamicpb.NewExtensionType(fieldDesc)
+ elemValue = et.New().List().NewElement()
+ } else {
+ parentMsgType := fieldDesc.ContainingMessage()
+ parentMsg := dynamicpb.NewMessage(parentMsgType)
+ listField := parentMsg.NewField(fieldDesc).List()
+ elemValue = listField.NewElement()
+ }
+ elem := elemValue.Interface()
+ switch elemType := elem.(type) {
+ case protoreflect.Message:
+ elem = elemType.Interface()
+ }
+ reflectType = reflectTypeOf(elem)
+ }
+ }
+ // Ensure the list type is appropriately reflected as a Go-native list.
+ if fieldDesc.IsList() {
+ reflectType = reflect.SliceOf(reflectType)
+ }
+ var keyType, valType *FieldDescription
+ if fieldDesc.IsMap() {
+ keyType = newFieldDescription(fieldDesc.MapKey())
+ valType = newFieldDescription(fieldDesc.MapValue())
+ }
+ return &FieldDescription{
+ desc: fieldDesc,
+ KeyType: keyType,
+ ValueType: valType,
+ reflectType: reflectType,
+ zeroMsg: zeroValueOf(zeroMsg),
+ }
+}
+
+// FieldDescription holds metadata related to fields declared within a type.
+type FieldDescription struct {
+ // KeyType holds the key FieldDescription for map fields.
+ KeyType *FieldDescription
+ // ValueType holds the value FieldDescription for map fields.
+ ValueType *FieldDescription
+
+ desc protoreflect.FieldDescriptor
+ reflectType reflect.Type
+ zeroMsg proto.Message
+}
+
+// CheckedType returns the type-definition used at type-check time.
+func (fd *FieldDescription) CheckedType() *exprpb.Type {
+ if fd.desc.IsMap() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: fd.KeyType.typeDefToType(),
+ ValueType: fd.ValueType.typeDefToType(),
+ },
+ },
+ }
+ }
+ if fd.desc.IsList() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: fd.typeDefToType()}}}
+ }
+ return fd.typeDefToType()
+}
+
+// Descriptor returns the protoreflect.FieldDescriptor for this type.
+func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor {
+ return fd.desc
+}
+
+// IsSet returns whether the field is set on the target value, per the proto presence conventions
+// of proto2 or proto3 accordingly.
+//
+// This function implements the FieldType.IsSet function contract which can be used to operate on
+// more than just protobuf field accesses; however, the target here must be a protobuf.Message.
+func (fd *FieldDescription) IsSet(target any) bool {
+ switch v := target.(type) {
+ case proto.Message:
+ pbRef := v.ProtoReflect()
+ pbDesc := pbRef.Descriptor()
+ if pbDesc == fd.desc.ContainingMessage() {
+ // When the target protobuf shares the same message descriptor instance as the field
+ // descriptor, use the cached field descriptor value.
+ return pbRef.Has(fd.desc)
+ }
+ // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
+ // instance as an attempt to use the cached field descriptor will result in a panic.
+ return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name())))
+ default:
+ return false
+ }
+}
+
+// GetFrom returns the accessor method associated with the field on the proto generated struct.
+//
+// If the field is not set, the proto default value is returned instead.
+//
+// This function implements the FieldType.GetFrom function contract which can be used to operate
+// on more than just protobuf field accesses; however, the target here must be a protobuf.Message.
+func (fd *FieldDescription) GetFrom(target any) (any, error) {
+ v, ok := target.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target)
+ }
+ pbRef := v.ProtoReflect()
+ pbDesc := pbRef.Descriptor()
+ var fieldVal any
+ if pbDesc == fd.desc.ContainingMessage() {
+ // When the target protobuf shares the same message descriptor instance as the field
+ // descriptor, use the cached field descriptor value.
+ fieldVal = pbRef.Get(fd.desc).Interface()
+ } else {
+ // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
+ // instance as an attempt to use the cached field descriptor will result in a panic.
+ fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface()
+ }
+ switch fv := fieldVal.(type) {
+ // Fast-path return for primitive types.
+ case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List:
+ return fv, nil
+ case protoreflect.EnumNumber:
+ return int64(fv), nil
+ case protoreflect.Map:
+ // Return a wrapper around the protobuf-reflected Map types which carries additional
+ // information about the key and value definitions of the map.
+ return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil
+ case protoreflect.Message:
+ // Make sure to unwrap well-known protobuf types before returning.
+ unwrapped, _, err := fd.MaybeUnwrapDynamic(fv)
+ return unwrapped, err
+ default:
+ return fv, nil
+ }
+}
+
+// IsEnum returns true if the field type refers to an enum value.
+func (fd *FieldDescription) IsEnum() bool {
+ return fd.ProtoKind() == protoreflect.EnumKind
+}
+
+// IsMap returns true if the field is of map type.
+func (fd *FieldDescription) IsMap() bool {
+ return fd.desc.IsMap()
+}
+
+// IsMessage returns true if the field is of message type.
+func (fd *FieldDescription) IsMessage() bool {
+ kind := fd.ProtoKind()
+ return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind
+}
+
+// IsOneof returns true if the field is declared within a oneof block.
+func (fd *FieldDescription) IsOneof() bool {
+ return fd.desc.ContainingOneof() != nil
+}
+
+// IsList returns true if the field is a repeated value.
+//
+// This method will also return true for map values, so check whether the
+// field is also a map.
+func (fd *FieldDescription) IsList() bool {
+ return fd.desc.IsList()
+}
+
+// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the
+// value can be unwrapped to a more primitive CEL type.
+//
+// This function returns the unwrapped value and 'true' on success, or the original value
+// and 'false' otherwise.
+func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) {
+ return unwrapDynamic(fd, msg)
+}
+
+// Name returns the CamelCase name of the field within the proto-based struct.
+func (fd *FieldDescription) Name() string {
+ return string(fd.desc.Name())
+}
+
+// ProtoKind returns the protobuf reflected kind of the field.
+func (fd *FieldDescription) ProtoKind() protoreflect.Kind {
+ return fd.desc.Kind()
+}
+
+// ReflectType returns the Golang reflect.Type for this field.
+func (fd *FieldDescription) ReflectType() reflect.Type {
+ return fd.reflectType
+}
+
+// String returns the fully qualified name of the field within its type as well as whether the
+// field occurs within a oneof.
+func (fd *FieldDescription) String() string {
+ return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof())
+}
+
+// Zero returns the zero value for the protobuf message represented by this field.
+//
+// If the field is not a proto.Message type, the zero value is nil.
+func (fd *FieldDescription) Zero() proto.Message {
+ return fd.zeroMsg
+}
+
+func (fd *FieldDescription) typeDefToType() *exprpb.Type {
+ if fd.IsMessage() {
+ msgType := string(fd.desc.Message().FullName())
+ if wk, found := CheckedWellKnowns[msgType]; found {
+ return wk
+ }
+ return checkedMessageType(msgType)
+ }
+ if fd.IsEnum() {
+ return checkedInt
+ }
+ return CheckedPrimitives[fd.ProtoKind()]
+}
+
+// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in
+// retrieving individual elements within CEL value data types.
+type Map struct {
+ protoreflect.Map
+ KeyType *FieldDescription
+ ValueType *FieldDescription
+}
+
+func checkedMessageType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{MessageType: name}}
+}
+
+func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{Primitive: primitive}}
+}
+
+func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}}
+}
+
+func checkedWrap(t *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}}
+}
+
+// unwrap unwraps the provided proto.Message value, potentially based on the description if the
+// input message is a *dynamicpb.Message which obscures the typing information from Go.
+//
+// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
+func unwrap(desc description, msg proto.Message) (any, bool, error) {
+ switch v := msg.(type) {
+ case *anypb.Any:
+ dynMsg, err := v.UnmarshalNew()
+ if err != nil {
+ return v, false, err
+ }
+ return unwrapDynamic(desc, dynMsg.ProtoReflect())
+ case *dynamicpb.Message:
+ return unwrapDynamic(desc, v)
+ case *dpb.Duration:
+ return v.AsDuration(), true, nil
+ case *tpb.Timestamp:
+ return v.AsTime(), true, nil
+ case *structpb.Value:
+ switch v.GetKind().(type) {
+ case *structpb.Value_BoolValue:
+ return v.GetBoolValue(), true, nil
+ case *structpb.Value_ListValue:
+ return v.GetListValue(), true, nil
+ case *structpb.Value_NullValue:
+ return structpb.NullValue_NULL_VALUE, true, nil
+ case *structpb.Value_NumberValue:
+ return v.GetNumberValue(), true, nil
+ case *structpb.Value_StringValue:
+ return v.GetStringValue(), true, nil
+ case *structpb.Value_StructValue:
+ return v.GetStructValue(), true, nil
+ default:
+ return structpb.NullValue_NULL_VALUE, true, nil
+ }
+ case *wrapperspb.BoolValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.BytesValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.DoubleValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.FloatValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return float64(v.GetValue()), true, nil
+ case *wrapperspb.Int32Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return int64(v.GetValue()), true, nil
+ case *wrapperspb.Int64Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.StringValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.UInt32Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return uint64(v.GetValue()), true, nil
+ case *wrapperspb.UInt64Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ }
+ return msg, false, nil
+}
+
+// unwrapDynamic unwraps a reflected protobuf Message value.
+//
+// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
+func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) {
+ msg := refMsg.Interface()
+ if !refMsg.IsValid() {
+ msg = desc.Zero()
+ }
+ // In order to ensure that these wrapped types match the expectations of the CEL type system
+ // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value.
+ typeName := string(refMsg.Descriptor().FullName())
+ switch typeName {
+ case "google.protobuf.Any":
+ // Note, Any values require further unwrapping; however, this unwrapping may or may not
+ // be to a well-known type. If the unwrapped value is a well-known type it will be further
+ // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object
+ // represented by the Any will be returned.
+ unwrappedAny := &anypb.Any{}
+ err := Merge(unwrappedAny, msg)
+ if err != nil {
+ return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err)
+ }
+ dynMsg, err := unwrappedAny.UnmarshalNew()
+ if err != nil {
+ // Allow the error to move further up the stack as it should result in an type
+ // conversion error if the caller does not recover it somehow.
+ return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err)
+ }
+ // Attempt to unwrap the dynamic type, otherwise return the dynamic message.
+ unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect())
+ if err == nil && nested {
+ return unwrapped, true, nil
+ }
+ return dynMsg, true, err
+ case "google.protobuf.BoolValue",
+ "google.protobuf.BytesValue",
+ "google.protobuf.DoubleValue",
+ "google.protobuf.FloatValue",
+ "google.protobuf.Int32Value",
+ "google.protobuf.Int64Value",
+ "google.protobuf.StringValue",
+ "google.protobuf.UInt32Value",
+ "google.protobuf.UInt64Value":
+ // The msg value is ignored when dealing with wrapper types as they have a null or value
+ // behavior, rather than the standard zero value behavior of other proto message types.
+ if !refMsg.IsValid() {
+ return structpb.NullValue_NULL_VALUE, true, nil
+ }
+ valueField := refMsg.Descriptor().Fields().ByName("value")
+ return refMsg.Get(valueField).Interface(), true, nil
+ case "google.protobuf.Duration":
+ unwrapped := &dpb.Duration{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped.AsDuration(), true, nil
+ case "google.protobuf.ListValue":
+ unwrapped := &structpb.ListValue{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped, true, nil
+ case "google.protobuf.NullValue":
+ return structpb.NullValue_NULL_VALUE, true, nil
+ case "google.protobuf.Struct":
+ unwrapped := &structpb.Struct{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped, true, nil
+ case "google.protobuf.Timestamp":
+ unwrapped := &tpb.Timestamp{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped.AsTime(), true, nil
+ case "google.protobuf.Value":
+ unwrapped := &structpb.Value{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrap(desc, unwrapped)
+ }
+ return msg, false, nil
+}
+
+// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve
+// well-known protobuf reflected types expected by the CEL type system.
+func reflectTypeOf(val any) reflect.Type {
+ switch v := val.(type) {
+ case proto.Message:
+ return reflect.TypeOf(zeroValueOf(v))
+ default:
+ return reflect.TypeOf(v)
+ }
+}
+
+// zeroValueOf will return the strongest possible proto.Message representing the default protobuf
+// message value of the input msg type.
+func zeroValueOf(msg proto.Message) proto.Message {
+ if msg == nil {
+ return nil
+ }
+ typeName := string(msg.ProtoReflect().Descriptor().FullName())
+ zeroVal, found := zeroValueMap[typeName]
+ if found {
+ return zeroVal
+ }
+ return msg
+}
+
+var (
+ jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value"
+
+ zeroValueMap = map[string]proto.Message{
+ "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL},
+ "google.protobuf.Duration": &dpb.Duration{},
+ "google.protobuf.ListValue": &structpb.ListValue{},
+ "google.protobuf.Struct": &structpb.Struct{},
+ "google.protobuf.Timestamp": &tpb.Timestamp{},
+ "google.protobuf.Value": &structpb.Value{},
+ "google.protobuf.BoolValue": wrapperspb.Bool(false),
+ "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}),
+ "google.protobuf.DoubleValue": wrapperspb.Double(0.0),
+ "google.protobuf.FloatValue": wrapperspb.Float(0.0),
+ "google.protobuf.Int32Value": wrapperspb.Int32(0),
+ "google.protobuf.Int64Value": wrapperspb.Int64(0),
+ "google.protobuf.StringValue": wrapperspb.String(""),
+ "google.protobuf.UInt32Value": wrapperspb.UInt32(0),
+ "google.protobuf.UInt64Value": wrapperspb.UInt64(0),
+ }
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/provider.go b/hack/tools/vendor/github.com/google/cel-go/common/types/provider.go
new file mode 100644
index 0000000000..936a4e28b2
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/provider.go
@@ -0,0 +1,766 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Adapter converts native Go values of varying type and complexity to equivalent CEL values.
+type Adapter = ref.TypeAdapter
+
+// Provider specifies functions for creating new object instances and for resolving
+// enum values by name.
+type Provider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) ref.Val
+
+ // FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
+ FindIdent(identName string) (ref.Val, bool)
+
+ // FindStructType returns the Type give a qualified type name.
+ //
+ // For historical reasons, only struct types are expected to be returned through this
+ // method, and the type values are expected to be wrapped in a TypeType instance using
+ // TypeTypeWithParam().
+ //
+ // Returns false if not found.
+ FindStructType(structType string) (*Type, bool)
+
+ // FindStructFieldNames returns thet field names associated with the type, if the type
+ // is found.
+ FindStructFieldNames(structType string) ([]string, bool)
+
+ // FieldStructFieldType returns the field type for a checked type value. Returns
+ // false if the field could not be found.
+ FindStructFieldType(structType, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and map of field
+ // name to value.
+ //
+ // Note, for each value, the Val.ConvertToNative function will be invoked
+ // to convert the Val to the field's native type. If an error occurs during
+ // conversion, the NewValue will be a types.Err.
+ NewValue(structType string, fields map[string]ref.Val) ref.Val
+}
+
+// FieldType represents a field's type value and whether that field supports presence detection.
+type FieldType struct {
+ // Type of the field as a CEL native type value.
+ Type *Type
+
+ // IsSet indicates whether the field is set on an input object.
+ IsSet ref.FieldTester
+
+ // GetFrom retrieves the field value on the input object, if set.
+ GetFrom ref.FieldGetter
+}
+
+// Registry provides type information for a set of registered types.
+type Registry struct {
+ revTypeMap map[string]*Type
+ pbdb *pb.Db
+}
+
+// NewRegistry accepts a list of proto message instances and returns a type
+// provider which can create new instances of the provided message or any
+// message that proto depends upon in its FileDescriptor.
+func NewRegistry(types ...proto.Message) (*Registry, error) {
+ p := &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: pb.NewDb(),
+ }
+ err := p.RegisterType(
+ BoolType,
+ BytesType,
+ DoubleType,
+ DurationType,
+ IntType,
+ ListType,
+ MapType,
+ NullType,
+ StringType,
+ TimestampType,
+ TypeType,
+ UintType)
+ if err != nil {
+ return nil, err
+ }
+ // This block ensures that the well-known protobuf types are registered by default.
+ for _, fd := range p.pbdb.FileDescriptions() {
+ err = p.registerAllTypes(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, msgType := range types {
+ err = p.RegisterMessage(msgType)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return p, nil
+}
+
+// NewEmptyRegistry returns a registry which is completely unconfigured.
+func NewEmptyRegistry() *Registry {
+ return &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: pb.NewDb(),
+ }
+}
+
+// Copy copies the current state of the registry into its own memory space.
+func (p *Registry) Copy() *Registry {
+ copy := &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: p.pbdb.Copy(),
+ }
+ for k, v := range p.revTypeMap {
+ copy.revTypeMap[k] = v
+ }
+ return copy
+}
+
+// EnumValue returns the numeric value of the given enum value name.
+func (p *Registry) EnumValue(enumName string) ref.Val {
+ enumVal, found := p.pbdb.DescribeEnum(enumName)
+ if !found {
+ return NewErr("unknown enum name '%s'", enumName)
+ }
+ return Int(enumVal.Value())
+}
+
+// FindFieldType returns the field type for a checked type value. Returns false if
+// the field could not be found.
+//
+// Deprecated: use FindStructFieldType
+func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: field.CheckedType(),
+ IsSet: field.IsSet,
+ GetFrom: field.GetFrom}, true
+}
+
+// FindStructFieldNames returns the set of field names for the given struct type,
+// if the type exists in the registry.
+func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return []string{}, false
+ }
+ fieldMap := msgType.FieldMap()
+ fields := make([]string, len(fieldMap))
+ idx := 0
+ for f := range fieldMap {
+ fields[idx] = f
+ idx++
+ }
+ return fields, true
+}
+
+// FindStructFieldType returns the field type for a checked type value. Returns
+// false if the field could not be found.
+func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &FieldType{
+ Type: fieldDescToCELType(field),
+ IsSet: field.IsSet,
+ GetFrom: field.GetFrom}, true
+}
+
+// FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
+func (p *Registry) FindIdent(identName string) (ref.Val, bool) {
+ if t, found := p.revTypeMap[identName]; found {
+ return t, true
+ }
+ if enumVal, found := p.pbdb.DescribeEnum(identName); found {
+ return Int(enumVal.Value()), true
+ }
+ return nil, false
+}
+
+// FindType looks up the Type given a qualified typeName. Returns false if not found.
+//
+// Deprecated: use FindStructType
+func (p *Registry) FindType(structType string) (*exprpb.Type, bool) {
+ if _, found := p.pbdb.DescribeType(structType); !found {
+ return nil, false
+ }
+ if structType != "" && structType[0] == '.' {
+ structType = structType[1:]
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: structType}}}}, true
+}
+
+// FindStructType returns the Type give a qualified type name.
+//
+// For historical reasons, only struct types are expected to be returned through this
+// method, and the type values are expected to be wrapped in a TypeType instance using
+// TypeTypeWithParam().
+//
+// Returns false if not found.
+func (p *Registry) FindStructType(structType string) (*Type, bool) {
+ if _, found := p.pbdb.DescribeType(structType); !found {
+ return nil, false
+ }
+ if structType != "" && structType[0] == '.' {
+ structType = structType[1:]
+ }
+ return NewTypeTypeWithParam(NewObjectType(structType)), true
+}
+
+// NewValue creates a new type value from a qualified name and map of field
+// name to value.
+//
+// Note, for each value, the Val.ConvertToNative function will be invoked
+// to convert the Val to the field's native type. If an error occurs during
+// conversion, the NewValue will be a types.Err.
+func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val {
+ td, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return NewErr("unknown type '%s'", structType)
+ }
+ msg := td.New()
+ fieldMap := td.FieldMap()
+ for name, value := range fields {
+ field, found := fieldMap[name]
+ if !found {
+ return NewErr("no such field: %s", name)
+ }
+ err := msgSetField(msg, field, value)
+ if err != nil {
+ return &Err{error: err}
+ }
+ }
+ return p.NativeToValue(msg.Interface())
+}
+
+// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error {
+ fd, err := p.pbdb.RegisterDescriptor(fileDesc)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+// RegisterMessage registers a protocol buffer message and its dependencies.
+func (p *Registry) RegisterMessage(message proto.Message) error {
+ fd, err := p.pbdb.RegisterMessage(message)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+// RegisterType registers a type value with the provider which ensures the provider is aware of how to
+// map the type to an identifier.
+//
+// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name.
+// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the
+// traits present on the input and the runtime type name. By default this foreign type will be treated
+// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the
+// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions
+// to CEL, even when they're not based on protobuf types.
+func (p *Registry) RegisterType(types ...ref.Type) error {
+ for _, t := range types {
+ celType := maybeForeignType(t)
+ existing, found := p.revTypeMap[t.TypeName()]
+ if !found {
+ p.revTypeMap[t.TypeName()] = celType
+ continue
+ }
+ if !existing.IsEquivalentType(celType) {
+ return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType)
+ }
+ if existing.traitMask != celType.traitMask {
+ return fmt.Errorf(
+ "type registered with conflicting traits: %v with traits %v, input: %v",
+ existing.TypeName(), existing.traitMask, celType.traitMask)
+ }
+ }
+ return nil
+}
+
+// NativeToValue converts various "native" types to ref.Val with this specific implementation
+// providing support for custom proto-based types.
+//
+// This method should be the inverse of ref.Val.ConvertToNative.
+func (p *Registry) NativeToValue(value any) ref.Val {
+ if val, found := nativeToValue(p, value); found {
+ return val
+ }
+ switch v := value.(type) {
+ case proto.Message:
+ typeName := string(v.ProtoReflect().Descriptor().FullName())
+ td, found := p.pbdb.DescribeType(typeName)
+ if !found {
+ return NewErr("unknown type: '%s'", typeName)
+ }
+ unwrapped, isUnwrapped, err := td.MaybeUnwrap(v)
+ if err != nil {
+ return UnsupportedRefValConversionErr(v)
+ }
+ if isUnwrapped {
+ return p.NativeToValue(unwrapped)
+ }
+ typeVal, found := p.FindIdent(typeName)
+ if !found {
+ return NewErr("unknown type: '%s'", typeName)
+ }
+ return NewObject(p, td, typeVal, v)
+ case *pb.Map:
+ return NewProtoMap(p, v)
+ case protoreflect.List:
+ return NewProtoList(p, v)
+ case protoreflect.Message:
+ return p.NativeToValue(v.Interface())
+ case protoreflect.Value:
+ return p.NativeToValue(v.Interface())
+ }
+ return UnsupportedRefValConversionErr(value)
+}
+
+func (p *Registry) registerAllTypes(fd *pb.FileDescription) error {
+ for _, typeName := range fd.GetTypeNames() {
+ // skip well-known type names since they're automatically sanitized
+ // during NewObjectType() calls.
+ if _, found := checkedWellKnowns[typeName]; found {
+ continue
+ }
+ err := p.RegisterType(NewObjectTypeValue(typeName))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func fieldDescToCELType(field *pb.FieldDescription) *Type {
+ if field.IsMap() {
+ return NewMapType(
+ singularFieldDescToCELType(field.KeyType),
+ singularFieldDescToCELType(field.ValueType))
+ }
+ if field.IsList() {
+ return NewListType(singularFieldDescToCELType(field))
+ }
+ return singularFieldDescToCELType(field)
+}
+
+func singularFieldDescToCELType(field *pb.FieldDescription) *Type {
+ if field.IsMessage() {
+ return NewObjectType(string(field.Descriptor().Message().FullName()))
+ }
+ if field.IsEnum() {
+ return IntType
+ }
+ return ProtoCELPrimitives[field.ProtoKind()]
+}
+
+// defaultTypeAdapter converts go native types to CEL values.
+type defaultTypeAdapter struct{}
+
+var (
+ // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values.
+ DefaultTypeAdapter = &defaultTypeAdapter{}
+)
+
+// NativeToValue implements the ref.TypeAdapter interface.
+func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val {
+ if val, found := nativeToValue(a, value); found {
+ return val
+ }
+ return UnsupportedRefValConversionErr(value)
+}
+
+// nativeToValue returns the converted (ref.Val, true) of a conversion is found,
+// otherwise (nil, false)
+func nativeToValue(a Adapter, value any) (ref.Val, bool) {
+ switch v := value.(type) {
+ case nil:
+ return NullValue, true
+ case *Bool:
+ if v != nil {
+ return *v, true
+ }
+ case *Bytes:
+ if v != nil {
+ return *v, true
+ }
+ case *Double:
+ if v != nil {
+ return *v, true
+ }
+ case *Int:
+ if v != nil {
+ return *v, true
+ }
+ case *String:
+ if v != nil {
+ return *v, true
+ }
+ case *Uint:
+ if v != nil {
+ return *v, true
+ }
+ case bool:
+ return Bool(v), true
+ case int:
+ return Int(v), true
+ case int32:
+ return Int(v), true
+ case int64:
+ return Int(v), true
+ case uint:
+ return Uint(v), true
+ case uint32:
+ return Uint(v), true
+ case uint64:
+ return Uint(v), true
+ case float32:
+ return Double(v), true
+ case float64:
+ return Double(v), true
+ case string:
+ return String(v), true
+ case *dpb.Duration:
+ return Duration{Duration: v.AsDuration()}, true
+ case time.Duration:
+ return Duration{Duration: v}, true
+ case *tpb.Timestamp:
+ return Timestamp{Time: v.AsTime()}, true
+ case time.Time:
+ return Timestamp{Time: v}, true
+ case *bool:
+ if v != nil {
+ return Bool(*v), true
+ }
+ case *float32:
+ if v != nil {
+ return Double(*v), true
+ }
+ case *float64:
+ if v != nil {
+ return Double(*v), true
+ }
+ case *int:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *int32:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *int64:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *string:
+ if v != nil {
+ return String(*v), true
+ }
+ case *uint:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case *uint32:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case *uint64:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case []byte:
+ return Bytes(v), true
+ // specializations for common lists types.
+ case []string:
+ return NewStringList(a, v), true
+ case []ref.Val:
+ return NewRefValList(a, v), true
+ // specializations for common map types.
+ case map[string]string:
+ return NewStringStringMap(a, v), true
+ case map[string]any:
+ return NewStringInterfaceMap(a, v), true
+ case map[ref.Val]ref.Val:
+ return NewRefValMap(a, v), true
+ // additional specializations may be added upon request / need.
+ case *anypb.Any:
+ if v == nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ unpackedAny, err := v.UnmarshalNew()
+ if err != nil {
+ return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true
+ }
+ return a.NativeToValue(unpackedAny), true
+ case *structpb.NullValue, structpb.NullValue:
+ return NullValue, true
+ case *structpb.ListValue:
+ return NewJSONList(a, v), true
+ case *structpb.Struct:
+ return NewJSONStruct(a, v), true
+ case ref.Val:
+ return v, true
+ case protoreflect.EnumNumber:
+ return Int(v), true
+ case proto.Message:
+ if v == nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ typeName := string(v.ProtoReflect().Descriptor().FullName())
+ td, found := pb.DefaultDb.DescribeType(typeName)
+ if !found {
+ return nil, false
+ }
+ val, unwrapped, err := td.MaybeUnwrap(v)
+ if err != nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ if !unwrapped {
+ return nil, false
+ }
+ return a.NativeToValue(val), true
+ // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces
+ // which means that this case must appear after handling a proto.Message type.
+ case protoreflect.Message:
+ return a.NativeToValue(v.Interface()), true
+ default:
+ refValue := reflect.ValueOf(v)
+ if refValue.Kind() == reflect.Ptr {
+ if refValue.IsNil() {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ refValue = refValue.Elem()
+ }
+ refKind := refValue.Kind()
+ switch refKind {
+ case reflect.Array, reflect.Slice:
+ if refValue.Type().Elem() == reflect.TypeOf(byte(0)) {
+ if refValue.CanAddr() {
+ return Bytes(refValue.Bytes()), true
+ }
+ tmp := reflect.New(refValue.Type())
+ tmp.Elem().Set(refValue)
+ return Bytes(tmp.Elem().Bytes()), true
+ }
+ return NewDynamicList(a, v), true
+ case reflect.Map:
+ return NewDynamicMap(a, v), true
+ // type aliases of primitive types cannot be asserted as that type, but rather need
+ // to be downcast to int32 before being converted to a CEL representation.
+ case reflect.Bool:
+ boolTupe := reflect.TypeOf(false)
+ return Bool(refValue.Convert(boolTupe).Interface().(bool)), true
+ case reflect.Int:
+ intType := reflect.TypeOf(int(0))
+ return Int(refValue.Convert(intType).Interface().(int)), true
+ case reflect.Int8:
+ intType := reflect.TypeOf(int8(0))
+ return Int(refValue.Convert(intType).Interface().(int8)), true
+ case reflect.Int16:
+ intType := reflect.TypeOf(int16(0))
+ return Int(refValue.Convert(intType).Interface().(int16)), true
+ case reflect.Int32:
+ intType := reflect.TypeOf(int32(0))
+ return Int(refValue.Convert(intType).Interface().(int32)), true
+ case reflect.Int64:
+ intType := reflect.TypeOf(int64(0))
+ return Int(refValue.Convert(intType).Interface().(int64)), true
+ case reflect.Uint:
+ uintType := reflect.TypeOf(uint(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint)), true
+ case reflect.Uint8:
+ uintType := reflect.TypeOf(uint8(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint8)), true
+ case reflect.Uint16:
+ uintType := reflect.TypeOf(uint16(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint16)), true
+ case reflect.Uint32:
+ uintType := reflect.TypeOf(uint32(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint32)), true
+ case reflect.Uint64:
+ uintType := reflect.TypeOf(uint64(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint64)), true
+ case reflect.Float32:
+ doubleType := reflect.TypeOf(float32(0))
+ return Double(refValue.Convert(doubleType).Interface().(float32)), true
+ case reflect.Float64:
+ doubleType := reflect.TypeOf(float64(0))
+ return Double(refValue.Convert(doubleType).Interface().(float64)), true
+ case reflect.String:
+ stringType := reflect.TypeOf("")
+ return String(refValue.Convert(stringType).Interface().(string)), true
+ }
+ }
+ return nil, false
+}
+
+func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error {
+ if field.IsList() {
+ lv := target.NewField(field.Descriptor())
+ list, ok := val.(traits.Lister)
+ if !ok {
+ return unsupportedTypeConversionError(field, val)
+ }
+ err := msgSetListField(lv.List(), field, list)
+ if err != nil {
+ return err
+ }
+ target.Set(field.Descriptor(), lv)
+ return nil
+ }
+ if field.IsMap() {
+ mv := target.NewField(field.Descriptor())
+ mp, ok := val.(traits.Mapper)
+ if !ok {
+ return unsupportedTypeConversionError(field, val)
+ }
+ err := msgSetMapField(mv.Map(), field, mp)
+ if err != nil {
+ return err
+ }
+ target.Set(field.Descriptor(), mv)
+ return nil
+ }
+ v, err := val.ConvertToNative(field.ReflectType())
+ if err != nil {
+ return fieldTypeConversionError(field, err)
+ }
+ if v == nil {
+ return nil
+ }
+ switch pv := v.(type) {
+ case proto.Message:
+ v = pv.ProtoReflect()
+ }
+ target.Set(field.Descriptor(), protoreflect.ValueOf(v))
+ return nil
+}
+
+func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error {
+ elemReflectType := listField.ReflectType().Elem()
+ for i := Int(0); i < listVal.Size().(Int); i++ {
+ elem := listVal.Get(i)
+ elemVal, err := elem.ConvertToNative(elemReflectType)
+ if err != nil {
+ return fieldTypeConversionError(listField, err)
+ }
+ if elemVal == nil {
+ continue
+ }
+ switch ev := elemVal.(type) {
+ case proto.Message:
+ elemVal = ev.ProtoReflect()
+ }
+ target.Append(protoreflect.ValueOf(elemVal))
+ }
+ return nil
+}
+
+func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error {
+ targetKeyType := mapField.KeyType.ReflectType()
+ targetValType := mapField.ValueType.ReflectType()
+ it := mapVal.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ val := mapVal.Get(key)
+ k, err := key.ConvertToNative(targetKeyType)
+ if err != nil {
+ return fieldTypeConversionError(mapField, err)
+ }
+ v, err := val.ConvertToNative(targetValType)
+ if err != nil {
+ return fieldTypeConversionError(mapField, err)
+ }
+ if v == nil {
+ continue
+ }
+ switch pv := v.(type) {
+ case proto.Message:
+ v = pv.ProtoReflect()
+ }
+ target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v))
+ }
+ return nil
+}
+
+func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error {
+ msgName := field.Descriptor().ContainingMessage().FullName()
+ return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type())
+}
+
+func fieldTypeConversionError(field *pb.FieldDescription, err error) error {
+ msgName := field.Descriptor().ContainingMessage().FullName()
+ return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err)
+}
+
+var (
+ // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type.
+ ProtoCELPrimitives = map[protoreflect.Kind]*Type{
+ protoreflect.BoolKind: BoolType,
+ protoreflect.BytesKind: BytesType,
+ protoreflect.DoubleKind: DoubleType,
+ protoreflect.FloatKind: DoubleType,
+ protoreflect.Int32Kind: IntType,
+ protoreflect.Int64Kind: IntType,
+ protoreflect.Sint32Kind: IntType,
+ protoreflect.Sint64Kind: IntType,
+ protoreflect.Uint32Kind: UintType,
+ protoreflect.Uint64Kind: UintType,
+ protoreflect.Fixed32Kind: UintType,
+ protoreflect.Fixed64Kind: UintType,
+ protoreflect.Sfixed32Kind: IntType,
+ protoreflect.Sfixed64Kind: IntType,
+ protoreflect.StringKind: StringType,
+ }
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
new file mode 100644
index 0000000000..79330c3321
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
@@ -0,0 +1,20 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "provider.go",
+ "reference.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/ref",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/ref/provider.go b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/provider.go
new file mode 100644
index 0000000000..b9820023d6
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/provider.go
@@ -0,0 +1,102 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ref
+
+import (
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// TypeProvider specifies functions for creating new object instances and for
+// resolving enum values by name.
+//
+// Deprecated: use types.Provider
+type TypeProvider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) Val
+
+ // FindIdent takes a qualified identifier name and returns a Value if one exists.
+ FindIdent(identName string) (Val, bool)
+
+ // FindType looks up the Type given a qualified typeName. Returns false if not found.
+ FindType(typeName string) (*exprpb.Type, bool)
+
+ // FieldFieldType returns the field type for a checked type value. Returns false if
+ // the field could not be found.
+ FindFieldType(messageType, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and map of field name
+ // to value.
+ //
+ // Note, for each value, the Val.ConvertToNative function will be invoked to convert
+ // the Val to the field's native type. If an error occurs during conversion, the
+ // NewValue will be a types.Err.
+ NewValue(typeName string, fields map[string]Val) Val
+}
+
+// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
+//
+// Deprecated: use types.Adapter
+type TypeAdapter interface {
+ // NativeToValue converts the input `value` to a CEL `ref.Val`.
+ NativeToValue(value any) Val
+}
+
+// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
+// implementations support type-customization, so these features are optional. However, a
+// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types
+// which are registered can be converted to CEL representations.
+//
+// Deprecated: use types.Registry
+type TypeRegistry interface {
+ TypeAdapter
+ TypeProvider
+
+ // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+ RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error
+
+ // RegisterMessage registers a protocol buffer message and its dependencies.
+ RegisterMessage(message proto.Message) error
+
+ // RegisterType registers a type value with the provider which ensures the
+ // provider is aware of how to map the type to an identifier.
+ //
+ // If a type is provided more than once with an alternative definition, the
+ // call will result in an error.
+ RegisterType(types ...Type) error
+}
+
+// FieldType represents a field's type value and whether that field supports
+// presence detection.
+//
+// Deprecated: use types.FieldType
+type FieldType struct {
+ // Type of the field as a protobuf type value.
+ Type *exprpb.Type
+
+ // IsSet indicates whether the field is set on an input object.
+ IsSet FieldTester
+
+ // GetFrom retrieves the field value on the input object, if set.
+ GetFrom FieldGetter
+}
+
+// FieldTester is used to test field presence on an input object.
+type FieldTester func(target any) bool
+
+// FieldGetter is used to get the field value from an input object, if set.
+type FieldGetter func(target any) (any, error)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/ref/reference.go b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/reference.go
new file mode 100644
index 0000000000..e0d58145cd
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/reference.go
@@ -0,0 +1,63 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ref contains the reference interfaces used throughout the types components.
+package ref
+
+import (
+ "reflect"
+)
+
+// Type interface indicate the name of a given type.
+type Type interface {
+ // HasTrait returns whether the type has a given trait associated with it.
+ //
+ // See common/types/traits/traits.go for a list of supported traits.
+ HasTrait(trait int) bool
+
+ // TypeName returns the qualified type name of the type.
+ //
+ // The type name is also used as the type's identifier name at type-check and interpretation time.
+ TypeName() string
+}
+
+// Val interface defines the functions supported by all expression values.
+// Val implementations may specialize the behavior of the value through the addition of traits.
+type Val interface {
+ // ConvertToNative converts the Value to a native Go struct according to the
+ // reflected type description, or error if the conversion is not feasible.
+ //
+ // The ConvertToNative method is intended to be used to support conversion between CEL types
+ // and native types during object creation expressions or by clients who need to adapt the,
+ // returned CEL value into an equivalent Go value instance.
+ //
+ // When implementing or using ConvertToNative, the following guidelines apply:
+ // - Use ConvertToNative when marshalling CEL evaluation results to native types.
+ // - Do not use ConvertToNative within CEL extension functions.
+ // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf.
+ ConvertToNative(typeDesc reflect.Type) (any, error)
+
+ // ConvertToType supports type conversions between CEL value types supported by the expression language.
+ ConvertToType(typeValue Type) Val
+
+ // Equal returns true if the `other` value has the same type and content as the implementing struct.
+ Equal(other Val) Val
+
+ // Type returns the TypeValue of the value.
+ Type() Type
+
+ // Value returns the raw value of the instance which may not be directly compatible with the expression
+ // language types.
+ Value() any
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/string.go b/hack/tools/vendor/github.com/google/cel-go/common/types/string.go
new file mode 100644
index 0000000000..3a93743f24
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/string.go
@@ -0,0 +1,226 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String type implementation which supports addition, comparison, matching,
+// and size functions.
+type String string
+
+var (
+ stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{
+ overloads.Contains: StringContains,
+ overloads.EndsWith: StringEndsWith,
+ overloads.StartsWith: StringStartsWith,
+ }
+
+ stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{})
+)
+
+// Add implements traits.Adder.Add.
+func (s String) Add(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return s + otherString
+}
+
+// Compare implements traits.Comparer.Compare.
+func (s String) Compare(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return Int(strings.Compare(s.Value().(string), otherString.Value().(string)))
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.String:
+ return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.String(string(s)))
+ case jsonValueType:
+ // Convert to a protobuf representation of a JSON String.
+ return structpb.NewStringValue(string(s)), nil
+ case stringWrapperType:
+ // Convert to a wrapperspb.StringValue.
+ return wrapperspb.String(string(s)), nil
+ }
+ if typeDesc.Elem().Kind() == reflect.String {
+ p := s.Value().(string)
+ return &p, nil
+ }
+ case reflect.Interface:
+ sv := s.Value()
+ if reflect.TypeOf(sv).Implements(typeDesc) {
+ return sv, nil
+ }
+ if reflect.TypeOf(s).Implements(typeDesc) {
+ return s, nil
+ }
+ }
+ return nil, fmt.Errorf(
+ "unsupported native conversion from string to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (s String) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil {
+ return Int(n)
+ }
+ case UintType:
+ if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil {
+ return Uint(n)
+ }
+ case DoubleType:
+ if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil {
+ return Double(n)
+ }
+ case BoolType:
+ if b, err := strconv.ParseBool(s.Value().(string)); err == nil {
+ return Bool(b)
+ }
+ case BytesType:
+ return Bytes(s)
+ case DurationType:
+ if d, err := time.ParseDuration(s.Value().(string)); err == nil {
+ return durationOf(d)
+ }
+ case TimestampType:
+ if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil {
+ if t.Unix() < minUnixTime || t.Unix() > maxUnixTime {
+ return celErrTimestampOverflow
+ }
+ return timestampOf(t)
+ }
+ case StringType:
+ return s
+ case TypeType:
+ return StringType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (s String) Equal(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ return Bool(ok && s == otherString)
+}
+
+// IsZeroValue returns true if the string is empty.
+func (s String) IsZeroValue() bool {
+ return len(s) == 0
+}
+
+// Match implements traits.Matcher.Match.
+func (s String) Match(pattern ref.Val) ref.Val {
+ pat, ok := pattern.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(pattern)
+ }
+ matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string))
+ if err != nil {
+ return &Err{error: err}
+ }
+ return Bool(matched)
+}
+
+// Receive implements traits.Receiver.Receive.
+func (s String) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 1:
+ if f, found := stringOneArgOverloads[function]; found {
+ return f(s, args[0])
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Size implements traits.Sizer.Size.
+func (s String) Size() ref.Val {
+ return Int(len([]rune(s.Value().(string))))
+}
+
+// Type implements ref.Val.Type.
+func (s String) Type() ref.Type {
+ return StringType
+}
+
+// Value implements ref.Val.Value.
+func (s String) Value() any {
+ return string(s)
+}
+
+// StringContains returns whether the string contains a substring.
+func StringContains(s, sub ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ subStr, ok := sub.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(sub)
+ }
+ return Bool(strings.Contains(string(str), string(subStr)))
+}
+
+// StringEndsWith returns whether the target string contains the input suffix.
+func StringEndsWith(s, suf ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ sufStr, ok := suf.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(suf)
+ }
+ return Bool(strings.HasSuffix(string(str), string(sufStr)))
+}
+
+// StringStartsWith returns whether the target string contains the input prefix.
+func StringStartsWith(s, pre ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ preStr, ok := pre.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(pre)
+ }
+ return Bool(strings.HasPrefix(string(str), string(preStr)))
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/timestamp.go b/hack/tools/vendor/github.com/google/cel-go/common/types/timestamp.go
new file mode 100644
index 0000000000..33acdea8ef
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -0,0 +1,311 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Timestamp type implementation which supports add, compare, and subtract
+// operations. Timestamps are also capable of participating in dynamic
+// function dispatch to instance methods.
+type Timestamp struct {
+ time.Time
+}
+
+func timestampOf(t time.Time) Timestamp {
+ // Note that this function does not validate that time.Time is in our supported range.
+ return Timestamp{Time: t}
+}
+
+const (
+ // The number of seconds between year 1 and year 1970. This is borrowed from
+ // https://golang.org/src/time/time.go.
+ unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * (60 * 60 * 24)
+
+ // Number of seconds between `0001-01-01T00:00:00Z` and the Unix epoch.
+ minUnixTime int64 = -62135596800
+ // Number of seconds between `9999-12-31T23:59:59.999999999Z` and the Unix epoch.
+ maxUnixTime int64 = 253402300799
+)
+
+// Add implements traits.Adder.Add.
+func (t Timestamp) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ return other.(Duration).Add(t)
+ }
+ return MaybeNoSuchOverloadErr(other)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (t Timestamp) Compare(other ref.Val) ref.Val {
+ if TimestampType != other.Type() {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ ts1 := t.Time
+ ts2 := other.(Timestamp).Time
+ switch {
+ case ts1.Before(ts2):
+ return IntNegOne
+ case ts1.After(ts2):
+ return IntOne
+ default:
+ return IntZero
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the timestamp is already assignable to the desired type return it.
+ if reflect.TypeOf(t.Time).AssignableTo(typeDesc) {
+ return t.Time, nil
+ }
+ if reflect.TypeOf(t).AssignableTo(typeDesc) {
+ return t, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ // Pack the underlying time as a tpb.Timestamp into an Any value.
+ return anypb.New(tpb.New(t.Time))
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON
+ // string.
+ v := t.ConvertToType(StringType)
+ if IsError(v) {
+ return nil, v.(*Err)
+ }
+ return structpb.NewStringValue(string(v.(String))), nil
+ case timestampValueType:
+ // Unwrap the underlying tpb.Timestamp.
+ return tpb.New(t.Time), nil
+ }
+ return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(t.Format(time.RFC3339Nano))
+ case IntType:
+ // Return the Unix time in seconds since 1970
+ return Int(t.Unix())
+ case TimestampType:
+ return t
+ case TypeType:
+ return TimestampType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (t Timestamp) Equal(other ref.Val) ref.Val {
+ otherTime, ok := other.(Timestamp)
+ return Bool(ok && t.Time.Equal(otherTime.Time))
+}
+
+// IsZeroValue returns true if the timestamp is epoch 0.
+func (t Timestamp) IsZeroValue() bool {
+ return t.IsZero()
+}
+
+// Receive implements traits.Receiver.Receive.
+func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 0:
+ if f, found := timestampZeroArgOverloads[function]; found {
+ return f(t.Time)
+ }
+ case 1:
+ if f, found := timestampOneArgOverloads[function]; found {
+ return f(t.Time, args[0])
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
+ switch subtrahend.Type() {
+ case DurationType:
+ dur := subtrahend.(Duration)
+ val, err := subtractTimeDurationChecked(t.Time, dur.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return timestampOf(val)
+ case TimestampType:
+ t2 := subtrahend.(Timestamp).Time
+ val, err := subtractTimeChecked(t.Time, t2)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+ }
+ return MaybeNoSuchOverloadErr(subtrahend)
+}
+
+// Type implements ref.Val.Type.
+func (t Timestamp) Type() ref.Type {
+ return TimestampType
+}
+
+// Value implements ref.Val.Value.
+func (t Timestamp) Value() any {
+ return t.Time
+}
+
+var (
+ timestampValueType = reflect.TypeOf(&tpb.Timestamp{})
+
+ timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYear,
+ overloads.TimeGetMonth: timestampGetMonth,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYear,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBased,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeek,
+ overloads.TimeGetHours: timestampGetHours,
+ overloads.TimeGetMinutes: timestampGetMinutes,
+ overloads.TimeGetSeconds: timestampGetSeconds,
+ overloads.TimeGetMilliseconds: timestampGetMilliseconds}
+
+ timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYearWithTz,
+ overloads.TimeGetMonth: timestampGetMonthWithTz,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz,
+ overloads.TimeGetHours: timestampGetHoursWithTz,
+ overloads.TimeGetMinutes: timestampGetMinutesWithTz,
+ overloads.TimeGetSeconds: timestampGetSecondsWithTz,
+ overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz}
+)
+
+type timestampVisitor func(time.Time) ref.Val
+
+func timestampGetFullYear(t time.Time) ref.Val {
+ return Int(t.Year())
+}
+func timestampGetMonth(t time.Time) ref.Val {
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return Int(t.Month() - 1)
+}
+func timestampGetDayOfYear(t time.Time) ref.Val {
+ return Int(t.YearDay() - 1)
+}
+func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val {
+ return Int(t.Day() - 1)
+}
+func timestampGetDayOfMonthOneBased(t time.Time) ref.Val {
+ return Int(t.Day())
+}
+func timestampGetDayOfWeek(t time.Time) ref.Val {
+ return Int(t.Weekday())
+}
+func timestampGetHours(t time.Time) ref.Val {
+ return Int(t.Hour())
+}
+func timestampGetMinutes(t time.Time) ref.Val {
+ return Int(t.Minute())
+}
+func timestampGetSeconds(t time.Time) ref.Val {
+ return Int(t.Second())
+}
+func timestampGetMilliseconds(t time.Time) ref.Val {
+ return Int(t.Nanosecond() / 1000000)
+}
+
+func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetFullYear)(t)
+}
+func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMonth)(t)
+}
+func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfYear)(t)
+}
+func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthZeroBased)(t)
+}
+func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthOneBased)(t)
+}
+func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfWeek)(t)
+}
+func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetHours)(t)
+}
+func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMinutes)(t)
+}
+func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetSeconds)(t)
+}
+func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMilliseconds)(t)
+}
+
+func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
+ return func(t time.Time) ref.Val {
+ if StringType != tz.Type() {
+ return MaybeNoSuchOverloadErr(tz)
+ }
+ val := string(tz.(String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return visitor(t.In(loc))
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return WrapErr(err)
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return WrapErr(err)
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return visitor(t.In(timezone))
+ }
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
new file mode 100644
index 0000000000..b19eb8301e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
@@ -0,0 +1,29 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "comparer.go",
+ "container.go",
+ "field_tester.go",
+ "indexer.go",
+ "iterator.go",
+ "lister.go",
+ "mapper.go",
+ "matcher.go",
+ "math.go",
+ "receiver.go",
+ "sizer.go",
+ "traits.go",
+ "zeroer.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/traits",
+ deps = [
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/comparer.go
new file mode 100644
index 0000000000..b531d9ae2b
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/comparer.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Comparer interface for ordering comparisons between values in order to
+// support '<', '<=', '>=', '>' overloads.
+type Comparer interface {
+ // Compare this value to the input other value, returning an Int:
+ //
+ // this < other -> Int(-1)
+ // this == other -> Int(0)
+ // this > other -> Int(1)
+ //
+ // If the comparison cannot be made or is not supported, an error should
+ // be returned.
+ Compare(other ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/container.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/container.go
new file mode 100644
index 0000000000..cf5c621ae9
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/container.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Container interface which permits containment tests such as 'a in b'.
+type Container interface {
+ // Contains returns true if the value exists within the object.
+ Contains(value ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
new file mode 100644
index 0000000000..816a956523
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
@@ -0,0 +1,30 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// FieldTester indicates if a defined field on an object type is set to a
+// non-default value.
+//
+// For use with the `has()` macro.
+type FieldTester interface {
+ // IsSet returns true if the field is defined and set to a non-default
+ // value. The method will return false if defined and not set, and an error
+ // if the field is not defined.
+ IsSet(field ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/indexer.go
new file mode 100644
index 0000000000..662c6836c3
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/indexer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Indexer permits random access of elements by index 'a[b()]'.
+type Indexer interface {
+ // Get the value at the specified index or error.
+ Get(index ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/iterator.go
new file mode 100644
index 0000000000..91c10f08fc
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/iterator.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Iterable aggregate types permit traversal over their elements.
+type Iterable interface {
+ // Iterator returns a new iterator view of the struct.
+ Iterator() Iterator
+}
+
+// Iterator permits safe traversal over the contents of an aggregate type.
+type Iterator interface {
+ ref.Val
+
+ // HasNext returns true if there are unvisited elements in the Iterator.
+ HasNext() ref.Val
+
+ // Next returns the next element.
+ Next() ref.Val
+}
+
+// Foldable aggregate types support iteration over (key, value) or (index, value) pairs.
+type Foldable interface {
+ // Fold invokes the Folder.FoldEntry for all entries in the type
+ Fold(Folder)
+}
+
+// Folder performs a fold on a given entry and indicates whether to continue folding.
+type Folder interface {
+ // FoldEntry indicates the key, value pair associated with the entry.
+ // If the output is true, continue folding. Otherwise, terminate the fold.
+ FoldEntry(key, val any) bool
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/lister.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/lister.go
new file mode 100644
index 0000000000..e54781a602
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/lister.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Lister interface which aggregates the traits of a list.
+type Lister interface {
+ ref.Val
+ Adder
+ Container
+ Indexer
+ Iterable
+ Sizer
+}
+
+// MutableLister interface which emits an immutable result after an intermediate computation.
+//
+// Note, this interface is intended only to be used within Comprehensions where the mutable
+// value is not directly observable within the user-authored CEL expression.
+type MutableLister interface {
+ Lister
+ ToImmutableList() Lister
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/mapper.go
new file mode 100644
index 0000000000..d13333f3f6
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/mapper.go
@@ -0,0 +1,48 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Mapper interface which aggregates the traits of a maps.
+type Mapper interface {
+ ref.Val
+ Container
+ Indexer
+ Iterable
+ Sizer
+
+ // Find returns a value, if one exists, for the input key.
+ //
+ // If the key is not found the function returns (nil, false).
+ // If the input key is not valid for the map, or is Err or Unknown the function returns
+ // (Unknown|Err, false).
+ Find(key ref.Val) (ref.Val, bool)
+}
+
+// MutableMapper interface which emits an immutable result after an intermediate computation.
+//
+// Note, this interface is intended only to be used within Comprehensions where the mutable
+// value is not directly observable within the user-authored CEL expression.
+type MutableMapper interface {
+ Mapper
+
+ // Insert a key, value pair into the map, returning the map if the insert is successful
+ // and an error if key already exists in the mutable map.
+ Insert(k, v ref.Val) ref.Val
+
+ // ToImmutableMap converts a mutable map into an immutable map.
+ ToImmutableMap() Mapper
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/matcher.go
new file mode 100644
index 0000000000..085dc94ff4
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/matcher.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Matcher interface for supporting 'matches()' overloads.
+type Matcher interface {
+ // Match returns true if the pattern matches the current value.
+ Match(pattern ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/math.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/math.go
new file mode 100644
index 0000000000..86d5b9137e
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/math.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Adder interface to support '+' operator overloads.
+type Adder interface {
+ // Add returns a combination of the current value and other value.
+ //
+ // If the other value is an unsupported type, an error is returned.
+ Add(other ref.Val) ref.Val
+}
+
+// Divider interface to support '/' operator overloads.
+type Divider interface {
+ // Divide returns the result of dividing the current value by the input
+ // denominator.
+ //
+ // A denominator value of zero results in an error.
+ Divide(denominator ref.Val) ref.Val
+}
+
+// Modder interface to support '%' operator overloads.
+type Modder interface {
+ // Modulo returns the result of taking the modulus of the current value
+ // by the denominator.
+ //
+ // A denominator value of zero results in an error.
+ Modulo(denominator ref.Val) ref.Val
+}
+
+// Multiplier interface to support '*' operator overloads.
+type Multiplier interface {
+ // Multiply returns the result of multiplying the current and input value.
+ Multiply(other ref.Val) ref.Val
+}
+
+// Negater interface to support unary '-' and '!' operator overloads.
+type Negater interface {
+ // Negate returns the complement of the current value.
+ Negate() ref.Val
+}
+
+// Subtractor interface to support binary '-' operator overloads.
+type Subtractor interface {
+ // Subtract returns the result of subtracting the input from the current
+ // value.
+ Subtract(subtrahend ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/receiver.go
new file mode 100644
index 0000000000..8f41db45e8
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/receiver.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Receiver interface for routing instance method calls within a value.
+type Receiver interface {
+ // Receive accepts a function name, overload id, and arguments and returns
+ // a value.
+ Receive(function string, overload string, args []ref.Val) ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/sizer.go
new file mode 100644
index 0000000000..b80d25137a
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/sizer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Sizer interface for supporting 'size()' overloads.
+type Sizer interface {
+ // Size returns the number of elements or length of the value.
+ Size() ref.Val
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/traits.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/traits.go
new file mode 100644
index 0000000000..51a09df564
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/traits.go
@@ -0,0 +1,79 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package traits defines interfaces that a type may implement to participate
+// in operator overloads and function dispatch.
+package traits
+
+const (
+ // AdderType types provide a '+' operator overload.
+ AdderType = 1 << iota
+
+ // ComparerType types support ordering comparisons '<', '<=', '>', '>='.
+ ComparerType
+
+ // ContainerType types support 'in' operations.
+ ContainerType
+
+ // DividerType types support '/' operations.
+ DividerType
+
+ // FieldTesterType types support the detection of field value presence.
+ FieldTesterType
+
+ // IndexerType types support index access with dynamic values.
+ IndexerType
+
+ // IterableType types can be iterated over in comprehensions.
+ IterableType
+
+ // IteratorType types support iterator semantics.
+ IteratorType
+
+ // MatcherType types support pattern matching via 'matches' method.
+ MatcherType
+
+ // ModderType types support modulus operations '%'
+ ModderType
+
+ // MultiplierType types support '*' operations.
+ MultiplierType
+
+ // NegatorType types support either negation via '!' or '-'
+ NegatorType
+
+ // ReceiverType types support dynamic dispatch to instance methods.
+ ReceiverType
+
+ // SizerType types support the size() method.
+ SizerType
+
+ // SubtractorType types support '-' operations.
+ SubtractorType
+
+ // FoldableType types support comprehensions v2 macros which iterate over (key, value) pairs.
+ FoldableType
+)
+
+const (
+ // ListerType supports a set of traits necessary for list operations.
+ //
+ // The ListerType is syntactic sugar and not intended to be a perfect reflection of all List operators.
+ ListerType = AdderType | ContainerType | IndexerType | IterableType | SizerType
+
+ // MapperType supports a set of traits necessary for map operations.
+ //
+ // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators.
+ MapperType = ContainerType | IndexerType | IterableType | SizerType
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/zeroer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
new file mode 100644
index 0000000000..0b7c830a24
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
@@ -0,0 +1,21 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+// Zeroer interface for testing whether a CEL value is a zero value for its type.
+type Zeroer interface {
+ // IsZeroValue indicates whether the object is the zero value for the type.
+ IsZeroValue() bool
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/types.go b/hack/tools/vendor/github.com/google/cel-go/common/types/types.go
new file mode 100644
index 0000000000..1c5b6c40c3
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/types.go
@@ -0,0 +1,864 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "google.golang.org/protobuf/proto"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Kind indicates a CEL type's kind which is used to differentiate quickly between simple
+// and complex types.
+type Kind uint
+
+const (
+ // UnspecifiedKind is returned when the type is nil or its kind is not specified.
+ UnspecifiedKind Kind = iota
+
+ // DynKind represents a dynamic type. This kind only exists at type-check time.
+ DynKind
+
+ // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
+ // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf
+ // well-known types.
+ AnyKind
+
+ // BoolKind represents a boolean type.
+ BoolKind
+
+ // BytesKind represents a bytes type.
+ BytesKind
+
+ // DoubleKind represents a double type.
+ DoubleKind
+
+ // DurationKind represents a CEL duration type.
+ DurationKind
+
+ // ErrorKind represents a CEL error type.
+ ErrorKind
+
+ // IntKind represents an integer type.
+ IntKind
+
+ // ListKind represents a list type.
+ ListKind
+
+ // MapKind represents a map type.
+ MapKind
+
+ // NullTypeKind represents a null type.
+ NullTypeKind
+
+ // OpaqueKind represents an abstract type which has no accessible fields.
+ OpaqueKind
+
+ // StringKind represents a string type.
+ StringKind
+
+ // StructKind represents a structured object with typed fields.
+ StructKind
+
+ // TimestampKind represents a a CEL time type.
+ TimestampKind
+
+ // TypeKind represents the CEL type.
+ TypeKind
+
+ // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
+ TypeParamKind
+
+ // UintKind represents a uint type.
+ UintKind
+
+ // UnknownKind represents an unknown value type.
+ UnknownKind
+)
+
+var (
+ // AnyType represents the google.protobuf.Any type.
+ AnyType = &Type{
+ kind: AnyKind,
+ runtimeTypeName: "google.protobuf.Any",
+ traitMask: traits.FieldTesterType |
+ traits.IndexerType,
+ }
+ // BoolType represents the bool type.
+ BoolType = &Type{
+ kind: BoolKind,
+ runtimeTypeName: "bool",
+ traitMask: traits.ComparerType |
+ traits.NegatorType,
+ }
+ // BytesType represents the bytes type.
+ BytesType = &Type{
+ kind: BytesKind,
+ runtimeTypeName: "bytes",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.SizerType,
+ }
+ // DoubleType represents the double type.
+ DoubleType = &Type{
+ kind: DoubleKind,
+ runtimeTypeName: "double",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.MultiplierType |
+ traits.NegatorType |
+ traits.SubtractorType,
+ }
+ // DurationType represents the CEL duration type.
+ DurationType = &Type{
+ kind: DurationKind,
+ runtimeTypeName: "google.protobuf.Duration",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.NegatorType |
+ traits.ReceiverType |
+ traits.SubtractorType,
+ }
+ // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
+ DynType = &Type{
+ kind: DynKind,
+ runtimeTypeName: "dyn",
+ }
+ // ErrorType represents a CEL error value.
+ ErrorType = &Type{
+ kind: ErrorKind,
+ runtimeTypeName: "error",
+ }
+ // IntType represents the int type.
+ IntType = &Type{
+ kind: IntKind,
+ runtimeTypeName: "int",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.ModderType |
+ traits.MultiplierType |
+ traits.NegatorType |
+ traits.SubtractorType,
+ }
+ // ListType represents the runtime list type.
+ ListType = NewListType(nil)
+ // MapType represents the runtime map type.
+ MapType = NewMapType(nil, nil)
+ // NullType represents the type of a null value.
+ NullType = &Type{
+ kind: NullTypeKind,
+ runtimeTypeName: "null_type",
+ }
+ // StringType represents the string type.
+ StringType = &Type{
+ kind: StringKind,
+ runtimeTypeName: "string",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.MatcherType |
+ traits.ReceiverType |
+ traits.SizerType,
+ }
+ // TimestampType represents the time type.
+ TimestampType = &Type{
+ kind: TimestampKind,
+ runtimeTypeName: "google.protobuf.Timestamp",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.ReceiverType |
+ traits.SubtractorType,
+ }
+ // TypeType represents a CEL type
+ TypeType = &Type{
+ kind: TypeKind,
+ runtimeTypeName: "type",
+ }
+ // UintType represents a uint type.
+ UintType = &Type{
+ kind: UintKind,
+ runtimeTypeName: "uint",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.ModderType |
+ traits.MultiplierType |
+ traits.SubtractorType,
+ }
+ // UnknownType represents an unknown value type.
+ UnknownType = &Type{
+ kind: UnknownKind,
+ runtimeTypeName: "unknown",
+ }
+)
+
+var _ ref.Type = &Type{}
+var _ ref.Val = &Type{}
+
+// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
+type Type struct {
+ // kind indicates general category of the type.
+ kind Kind
+
+ // parameters holds the optional type-checked set of type Parameters that are used during static analysis.
+ parameters []*Type
+
+ // runtimeTypeName indicates the runtime type name of the type.
+ runtimeTypeName string
+
+ // isAssignableType function determines whether one type is assignable to this type.
+ // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters.
+ isAssignableType func(other *Type) bool
+
+ // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type.
+ // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name.
+ isAssignableRuntimeType func(other ref.Val) bool
+
+ // traitMask is a mask of flags which indicate the capabilities of the type.
+ traitMask int
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion not supported for 'type'")
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t *Type) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case TypeType:
+ return TypeType
+ case StringType:
+ return String(t.TypeName())
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
+}
+
+// Equal indicates whether two types have the same runtime type name.
+//
+// The name Equal is a bit of a misnomer, but for historical reasons, this is the
+// runtime behavior. For a more accurate definition see IsType().
+func (t *Type) Equal(other ref.Val) ref.Val {
+ otherType, ok := other.(ref.Type)
+ return Bool(ok && t.TypeName() == otherType.TypeName())
+}
+
+// HasTrait implements the ref.Type interface method.
+func (t *Type) HasTrait(trait int) bool {
+ return trait&t.traitMask == trait
+}
+
+// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names.
+func (t *Type) IsExactType(other *Type) bool {
+ return t.isTypeInternal(other, true)
+}
+
+// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names.
+func (t *Type) IsEquivalentType(other *Type) bool {
+ return t.isTypeInternal(other, false)
+}
+
+// Kind indicates general category of the type.
+func (t *Type) Kind() Kind {
+ if t == nil {
+ return UnspecifiedKind
+ }
+ return t.kind
+}
+
+// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag.
+func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool {
+ if t == nil {
+ return false
+ }
+ if t == other {
+ return true
+ }
+ if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) {
+ return false
+ }
+ if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() {
+ return false
+ }
+ for i, p := range t.Parameters() {
+ if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsAssignableType determines whether the current type is type-check assignable from the input fromType.
+func (t *Type) IsAssignableType(fromType *Type) bool {
+ if t == nil {
+ return false
+ }
+ if t.isAssignableType != nil {
+ return t.isAssignableType(fromType)
+ }
+ return t.defaultIsAssignableType(fromType)
+}
+
+// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType.
+//
+// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string)
+// will have a runtime assignable type of a map.
+func (t *Type) IsAssignableRuntimeType(val ref.Val) bool {
+ if t == nil {
+ return false
+ }
+ if t.isAssignableRuntimeType != nil {
+ return t.isAssignableRuntimeType(val)
+ }
+ return t.defaultIsAssignableRuntimeType(val)
+}
+
+// Parameters returns the list of type parameters if set.
+//
+// For ListKind, Parameters()[0] represents the list element type
+// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map
+// value type.
+func (t *Type) Parameters() []*Type {
+ if t == nil {
+ return emptyParams
+ }
+ return t.parameters
+}
+
+// DeclaredTypeName indicates the fully qualified and parameterized type-check type name.
+func (t *Type) DeclaredTypeName() string {
+ // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type.
+ if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) {
+ return fmt.Sprintf("wrapper(%s)", t.TypeName())
+ }
+ return t.TypeName()
+}
+
+// Type implements the ref.Val interface method.
+func (t *Type) Type() ref.Type {
+ return TypeType
+}
+
+// Value implements the ref.Val interface method.
+func (t *Type) Value() any {
+ return t.TypeName()
+}
+
+// TypeName returns the type-erased fully qualified runtime type name.
+//
+// TypeName implements the ref.Type interface method.
+func (t *Type) TypeName() string {
+ if t == nil {
+ return ""
+ }
+ return t.runtimeTypeName
+}
+
+// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter.
+//
+// This method should be used with Opaque types where the type acts like a container, e.g. vector.
+func (t *Type) WithTraits(traits int) *Type {
+ if t == nil {
+ return nil
+ }
+ return &Type{
+ kind: t.kind,
+ parameters: t.parameters,
+ runtimeTypeName: t.runtimeTypeName,
+ isAssignableType: t.isAssignableType,
+ isAssignableRuntimeType: t.isAssignableRuntimeType,
+ traitMask: traits,
+ }
+}
+
+// String returns a human-readable definition of the type name.
+func (t *Type) String() string {
+ if len(t.Parameters()) == 0 {
+ return t.DeclaredTypeName()
+ }
+ params := make([]string, len(t.Parameters()))
+ for i, p := range t.Parameters() {
+ params[i] = p.String()
+ }
+ return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", "))
+}
+
+// isDyn indicates whether the type is dynamic in any way.
+func (t *Type) isDyn() bool {
+ k := t.Kind()
+ return k == DynKind || k == AnyKind || k == TypeParamKind
+}
+
+// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another
+// where any of the following may return a true result:
+// - The from types are the same instance
+// - The target type is dynamic
+// - The fromType has the same kind and type name as the target type, and all parameters of the target type
+//
+// are IsAssignableType() from the parameters of the fromType.
+func (t *Type) defaultIsAssignableType(fromType *Type) bool {
+ if t == fromType || t.isDyn() {
+ return true
+ }
+ if t.Kind() != fromType.Kind() ||
+ t.TypeName() != fromType.TypeName() ||
+ len(t.Parameters()) != len(fromType.Parameters()) {
+ return false
+ }
+ for i, tp := range t.Parameters() {
+ fp := fromType.Parameters()[i]
+ if !tp.IsAssignableType(fp) {
+ return false
+ }
+ }
+ return true
+}
+
+// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types
+// to determine whether a ref.Val is assignable to the declared type for a function signature.
+func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool {
+ valType := val.Type()
+ // If the current type and value type don't agree, then return
+ if !(t.isDyn() || t.TypeName() == valType.TypeName()) {
+ return false
+ }
+ switch t.Kind() {
+ case ListKind:
+ elemType := t.Parameters()[0]
+ l := val.(traits.Lister)
+ if l.Size() == IntZero {
+ return true
+ }
+ it := l.Iterator()
+ elemVal := it.Next()
+ return elemType.IsAssignableRuntimeType(elemVal)
+ case MapKind:
+ keyType := t.Parameters()[0]
+ elemType := t.Parameters()[1]
+ m := val.(traits.Mapper)
+ if m.Size() == IntZero {
+ return true
+ }
+ it := m.Iterator()
+ keyVal := it.Next()
+ elemVal := m.Get(keyVal)
+ return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal)
+ }
+ return true
+}
+
+// NewListType creates an instances of a list type value with the provided element type.
+func NewListType(elemType *Type) *Type {
+ return &Type{
+ kind: ListKind,
+ parameters: []*Type{elemType},
+ runtimeTypeName: "list",
+ traitMask: traits.AdderType |
+ traits.ContainerType |
+ traits.IndexerType |
+ traits.IterableType |
+ traits.SizerType,
+ }
+}
+
+// NewMapType creates an instance of a map type value with the provided key and value types.
+func NewMapType(keyType, valueType *Type) *Type {
+ return &Type{
+ kind: MapKind,
+ parameters: []*Type{keyType, valueType},
+ runtimeTypeName: "map",
+ traitMask: traits.ContainerType |
+ traits.IndexerType |
+ traits.IterableType |
+ traits.SizerType,
+ }
+}
+
+// NewNullableType creates an instance of a nullable type with the provided wrapped type.
+//
+// Note: only primitive types are supported as wrapped types.
+func NewNullableType(wrapped *Type) *Type {
+ return &Type{
+ kind: wrapped.Kind(),
+ parameters: wrapped.Parameters(),
+ runtimeTypeName: wrapped.TypeName(),
+ traitMask: wrapped.traitMask,
+ isAssignableType: func(other *Type) bool {
+ return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other)
+ },
+ isAssignableRuntimeType: func(other ref.Val) bool {
+ return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other)
+ },
+ }
+}
+
+// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+func NewOptionalType(param *Type) *Type {
+ return NewOpaqueType("optional_type", param)
+}
+
+// NewOpaqueType creates an abstract parameterized type with a given name.
+func NewOpaqueType(name string, params ...*Type) *Type {
+ return &Type{
+ kind: OpaqueKind,
+ parameters: params,
+ runtimeTypeName: name,
+ }
+}
+
+// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type.
+//
+// An object type is assumed to support field presence testing and field indexing. Additionally, the
+// type may also indicate additional traits through the use of the optional traits vararg argument.
+func NewObjectType(typeName string, traits ...int) *Type {
+ // Function sanitizes object types on the fly
+ if wkt, found := checkedWellKnowns[typeName]; found {
+ return wkt
+ }
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &Type{
+ kind: StructKind,
+ parameters: emptyParams,
+ runtimeTypeName: typeName,
+ traitMask: structTypeTraitMask | traitMask,
+ }
+}
+
+// NewObjectTypeValue creates a type reference to an externally defined type.
+//
+// Deprecated: use cel.ObjectType(typeName)
+func NewObjectTypeValue(typeName string) *Type {
+ return NewObjectType(typeName)
+}
+
+// NewTypeValue creates an opaque type which has a set of optional type traits as defined in
+// the common/types/traits package.
+//
+// Deprecated: use cel.ObjectType(typeName, traits)
+func NewTypeValue(typeName string, traits ...int) *Type {
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &Type{
+ kind: StructKind,
+ parameters: emptyParams,
+ runtimeTypeName: typeName,
+ traitMask: traitMask,
+ }
+}
+
+// NewTypeParamType creates a parameterized type instance.
+func NewTypeParamType(paramName string) *Type {
+ return &Type{
+ kind: TypeParamKind,
+ runtimeTypeName: paramName,
+ }
+}
+
+// NewTypeTypeWithParam creates a type with a type parameter.
+// Used for type-checking purposes, but equivalent to TypeType otherwise.
+func NewTypeTypeWithParam(param *Type) *Type {
+ return &Type{
+ kind: TypeKind,
+ runtimeTypeName: "type",
+ parameters: []*Type{param},
+ }
+}
+
+// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
+func TypeToExprType(t *Type) (*exprpb.Type, error) {
+ switch t.Kind() {
+ case AnyKind:
+ return chkdecls.Any, nil
+ case BoolKind:
+ return maybeWrapper(t, chkdecls.Bool), nil
+ case BytesKind:
+ return maybeWrapper(t, chkdecls.Bytes), nil
+ case DoubleKind:
+ return maybeWrapper(t, chkdecls.Double), nil
+ case DurationKind:
+ return chkdecls.Duration, nil
+ case DynKind:
+ return chkdecls.Dyn, nil
+ case ErrorKind:
+ return chkdecls.Error, nil
+ case IntKind:
+ return maybeWrapper(t, chkdecls.Int), nil
+ case ListKind:
+ if len(t.Parameters()) != 1 {
+ return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters()))
+ }
+ et, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewListType(et), nil
+ case MapKind:
+ if len(t.Parameters()) != 2 {
+ return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters()))
+ }
+ kt, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ vt, err := TypeToExprType(t.Parameters()[1])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewMapType(kt, vt), nil
+ case NullTypeKind:
+ return chkdecls.Null, nil
+ case OpaqueKind:
+ params := make([]*exprpb.Type, len(t.Parameters()))
+ for i, p := range t.Parameters() {
+ pt, err := TypeToExprType(p)
+ if err != nil {
+ return nil, err
+ }
+ params[i] = pt
+ }
+ return chkdecls.NewAbstractType(t.TypeName(), params...), nil
+ case StringKind:
+ return maybeWrapper(t, chkdecls.String), nil
+ case StructKind:
+ return chkdecls.NewObjectType(t.TypeName()), nil
+ case TimestampKind:
+ return chkdecls.Timestamp, nil
+ case TypeParamKind:
+ return chkdecls.NewTypeParamType(t.TypeName()), nil
+ case TypeKind:
+ if len(t.Parameters()) == 1 {
+ p, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewTypeType(p), nil
+ }
+ return chkdecls.NewTypeType(nil), nil
+ case UintKind:
+ return maybeWrapper(t, chkdecls.Uint), nil
+ }
+ return nil, fmt.Errorf("missing type conversion to proto: %v", t)
+}
+
+// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
+func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ return AlphaProtoAsType(t)
+}
+
+// AlphaProtoAsType converts a CEL v1alpha1.Type protobuf type to a CEL-native type representation.
+func AlphaProtoAsType(t *exprpb.Type) (*Type, error) {
+ canonical := &celpb.Type{}
+ if err := convertProto(t, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsType(canonical)
+}
+
+// ProtoAsType converts a canonical CEL celpb.Type protobuf type to a CEL-native type representation.
+func ProtoAsType(t *celpb.Type) (*Type, error) {
+ switch t.GetTypeKind().(type) {
+ case *celpb.Type_Dyn:
+ return DynType, nil
+ case *celpb.Type_AbstractType_:
+ paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes()))
+ for i, p := range t.GetAbstractType().GetParameterTypes() {
+ pt, err := ProtoAsType(p)
+ if err != nil {
+ return nil, err
+ }
+ paramTypes[i] = pt
+ }
+ return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil
+ case *celpb.Type_ListType_:
+ et, err := ProtoAsType(t.GetListType().GetElemType())
+ if err != nil {
+ return nil, err
+ }
+ return NewListType(et), nil
+ case *celpb.Type_MapType_:
+ kt, err := ProtoAsType(t.GetMapType().GetKeyType())
+ if err != nil {
+ return nil, err
+ }
+ vt, err := ProtoAsType(t.GetMapType().GetValueType())
+ if err != nil {
+ return nil, err
+ }
+ return NewMapType(kt, vt), nil
+ case *celpb.Type_MessageType:
+ return NewObjectType(t.GetMessageType()), nil
+ case *celpb.Type_Null:
+ return NullType, nil
+ case *celpb.Type_Primitive:
+ switch t.GetPrimitive() {
+ case celpb.Type_BOOL:
+ return BoolType, nil
+ case celpb.Type_BYTES:
+ return BytesType, nil
+ case celpb.Type_DOUBLE:
+ return DoubleType, nil
+ case celpb.Type_INT64:
+ return IntType, nil
+ case celpb.Type_STRING:
+ return StringType, nil
+ case celpb.Type_UINT64:
+ return UintType, nil
+ default:
+ return nil, fmt.Errorf("unsupported primitive type: %v", t)
+ }
+ case *celpb.Type_TypeParam:
+ return NewTypeParamType(t.GetTypeParam()), nil
+ case *celpb.Type_Type:
+ if t.GetType().GetTypeKind() != nil {
+ p, err := ProtoAsType(t.GetType())
+ if err != nil {
+ return nil, err
+ }
+ return NewTypeTypeWithParam(p), nil
+ }
+ return TypeType, nil
+ case *celpb.Type_WellKnown:
+ switch t.GetWellKnown() {
+ case celpb.Type_ANY:
+ return AnyType, nil
+ case celpb.Type_DURATION:
+ return DurationType, nil
+ case celpb.Type_TIMESTAMP:
+ return TimestampType, nil
+ default:
+ return nil, fmt.Errorf("unsupported well-known type: %v", t)
+ }
+ case *celpb.Type_Wrapper:
+ t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}})
+ if err != nil {
+ return nil, err
+ }
+ return NewNullableType(t), nil
+ case *celpb.Type_Error:
+ return ErrorType, nil
+ default:
+ return nil, fmt.Errorf("unsupported type: %v", t)
+ }
+}
+
+func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type {
+ if t.IsAssignableType(NullType) {
+ return chkdecls.NewWrapperType(pbType)
+ }
+ return pbType
+}
+
+func maybeForeignType(t ref.Type) *Type {
+ if celType, ok := t.(*Type); ok {
+ return celType
+ }
+ // Inspect the incoming type to determine its traits. The assumption will be that the incoming
+ // type does not have any field values; however, if the trait mask indicates that field testing
+ // and indexing are supported, the foreign type is marked as a struct.
+ traitMask := 0
+ for _, trait := range allTraits {
+ if t.HasTrait(trait) {
+ traitMask |= trait
+ }
+ }
+ // Treat the value like a struct. If it has no fields, this is harmless to denote the type
+ // as such since it basically becomes an opaque type by convention.
+ return NewObjectType(t.TypeName(), traitMask)
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
+
+func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type {
+ return &celpb.Type{
+ TypeKind: &celpb.Type_Primitive{
+ Primitive: primitive,
+ },
+ }
+}
+
+var (
+ checkedWellKnowns = map[string]*Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": NewNullableType(BoolType),
+ "google.protobuf.BytesValue": NewNullableType(BytesType),
+ "google.protobuf.DoubleValue": NewNullableType(DoubleType),
+ "google.protobuf.FloatValue": NewNullableType(DoubleType),
+ "google.protobuf.Int64Value": NewNullableType(IntType),
+ "google.protobuf.Int32Value": NewNullableType(IntType),
+ "google.protobuf.UInt64Value": NewNullableType(UintType),
+ "google.protobuf.UInt32Value": NewNullableType(UintType),
+ "google.protobuf.StringValue": NewNullableType(StringType),
+ // Well-known types.
+ "google.protobuf.Any": AnyType,
+ "google.protobuf.Duration": DurationType,
+ "google.protobuf.Timestamp": TimestampType,
+ // Json types.
+ "google.protobuf.ListValue": NewListType(DynType),
+ "google.protobuf.NullValue": NullType,
+ "google.protobuf.Struct": NewMapType(StringType, DynType),
+ "google.protobuf.Value": DynType,
+ }
+
+ emptyParams = []*Type{}
+
+ allTraits = []int{
+ traits.AdderType,
+ traits.ComparerType,
+ traits.ContainerType,
+ traits.DividerType,
+ traits.FieldTesterType,
+ traits.IndexerType,
+ traits.IterableType,
+ traits.IteratorType,
+ traits.MatcherType,
+ traits.ModderType,
+ traits.MultiplierType,
+ traits.NegatorType,
+ traits.ReceiverType,
+ traits.SizerType,
+ traits.SubtractorType,
+ }
+
+ structTypeTraitMask = traits.FieldTesterType | traits.IndexerType
+
+ boolType = primitiveType(celpb.Type_BOOL)
+ bytesType = primitiveType(celpb.Type_BYTES)
+ doubleType = primitiveType(celpb.Type_DOUBLE)
+ intType = primitiveType(celpb.Type_INT64)
+ stringType = primitiveType(celpb.Type_STRING)
+ uintType = primitiveType(celpb.Type_UINT64)
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/uint.go b/hack/tools/vendor/github.com/google/cel-go/common/types/uint.go
new file mode 100644
index 0000000000..6d74f30d88
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/uint.go
@@ -0,0 +1,256 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Uint type implementation which supports comparison and math operators.
+type Uint uint64
+
+var (
+ uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{})
+
+ uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{})
+)
+
+// Uint constants
+const (
+ uintZero = Uint(0)
+)
+
+// Add implements traits.Adder.Add.
+func (i Uint) Add(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := addUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Uint) Compare(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareUintDouble(i, ov)
+ case Int:
+ return compareUintInt(i, ov)
+ case Uint:
+ return compareUint(i, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Uint, reflect.Uint32:
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint8:
+ v, err := uint64ToUint8Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint16:
+ v, err := uint64ToUint16Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint64:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.UInt64(uint64(i)))
+ case jsonValueType:
+ // JSON can accurately represent 32-bit uints as floating point values.
+ if i.isJSONSafe() {
+ return structpb.NewNumberValue(float64(i)), nil
+ }
+ // Proto3 to JSON conversion requires string-formatted uint64 values
+ // since the conversion to floating point would result in truncation.
+ return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil
+ case uint32WrapperType:
+ // Convert the value to a wrapperspb.UInt32Value, error on overflow.
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return wrapperspb.UInt32(v), nil
+ case uint64WrapperType:
+ // Convert the value to a wrapperspb.UInt64Value.
+ return wrapperspb.UInt64(uint64(i)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Uint32:
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Uint64:
+ v := uint64(i)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ iv := i.Value()
+ if reflect.TypeOf(iv).Implements(typeDesc) {
+ return iv, nil
+ }
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ v, err := uint64ToInt64Checked(uint64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(v)
+ case UintType:
+ return i
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", uint64(i)))
+ case TypeType:
+ return UintType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Uint) Divide(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ div, err := divideUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(div)
+}
+
+// Equal implements ref.Val.Equal.
+func (i Uint) Equal(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(compareUintDouble(i, ov) == 0)
+ case Int:
+ return Bool(compareUintInt(i, ov) == 0)
+ case Uint:
+ return Bool(i == ov)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if the uint is zero.
+func (i Uint) IsZeroValue() bool {
+ return i == 0
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Uint) Modulo(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ mod, err := moduloUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(mod)
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Uint) Multiply(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := multiplyUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
+ subtraUint, ok := subtrahend.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractUint64Checked(uint64(i), uint64(subtraUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Type implements ref.Val.Type.
+func (i Uint) Type() ref.Type {
+ return UintType
+}
+
+// Value implements ref.Val.Value.
+func (i Uint) Value() any {
+ return uint64(i)
+}
+
+// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON.
+func (i Uint) isJSONSafe() bool {
+ return i <= maxIntJSON
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/unknown.go b/hack/tools/vendor/github.com/google/cel-go/common/types/unknown.go
new file mode 100644
index 0000000000..9dd2b25794
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/unknown.go
@@ -0,0 +1,326 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}}
+)
+
+// NewAttributeTrail creates a new simple attribute from a variable name.
+func NewAttributeTrail(variable string) *AttributeTrail {
+ if variable == "" {
+ return unspecifiedAttribute
+ }
+ return &AttributeTrail{variable: variable}
+}
+
+// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to
+// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable.
+//
+// The qualifer path elements adhere to the AttributeQualifier type constraint.
+type AttributeTrail struct {
+ variable string
+ qualifierPath []any
+}
+
+// Equal returns whether two attribute values have the same variable name and qualifier paths.
+func (a *AttributeTrail) Equal(other *AttributeTrail) bool {
+ if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) {
+ return false
+ }
+ for i, q := range a.QualifierPath() {
+ qual := other.QualifierPath()[i]
+ if !qualifiersEqual(q, qual) {
+ return false
+ }
+ }
+ return true
+}
+
+func qualifiersEqual(a, b any) bool {
+ if a == b {
+ return true
+ }
+ switch numA := a.(type) {
+ case int64:
+ numB, ok := b.(uint64)
+ if !ok {
+ return false
+ }
+ return intUintEqual(numA, numB)
+ case uint64:
+ numB, ok := b.(int64)
+ if !ok {
+ return false
+ }
+ return intUintEqual(numB, numA)
+ default:
+ return false
+ }
+}
+
+func intUintEqual(i int64, u uint64) bool {
+ if i < 0 || u > math.MaxInt64 {
+ return false
+ }
+ return i == int64(u)
+}
+
+// Variable returns the variable name associated with the attribute.
+func (a *AttributeTrail) Variable() string {
+ return a.variable
+}
+
+// QualifierPath returns the optional set of qualifying fields or indices applied to the variable.
+func (a *AttributeTrail) QualifierPath() []any {
+ return a.qualifierPath
+}
+
+// String returns the string representation of the Attribute.
+func (a *AttributeTrail) String() string {
+ if a.variable == "" {
+ return ""
+ }
+ var str strings.Builder
+ str.WriteString(a.variable)
+ for _, q := range a.qualifierPath {
+ switch q := q.(type) {
+ case bool, int64:
+ str.WriteString(fmt.Sprintf("[%v]", q))
+ case uint64:
+ str.WriteString(fmt.Sprintf("[%vu]", q))
+ case string:
+ if isIdentifierCharacter(q) {
+ str.WriteString(fmt.Sprintf(".%v", q))
+ } else {
+ str.WriteString(fmt.Sprintf("[%q]", q))
+ }
+ }
+ }
+ return str.String()
+}
+
+func isIdentifierCharacter(str string) bool {
+ for _, c := range str {
+ if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// AttributeQualifier constrains the possible types which may be used to qualify an attribute.
+type AttributeQualifier interface {
+ bool | int64 | uint64 | string
+}
+
+// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type.
+func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail {
+ attr.qualifierPath = append(attr.qualifierPath, qualifier)
+ return attr
+}
+
+// Unknown type which collects expression ids which caused the current value to become unknown.
+type Unknown struct {
+ attributeTrails map[int64][]*AttributeTrail
+}
+
+// NewUnknown creates a new unknown at a given expression id for an attribute.
+//
+// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`.
+func NewUnknown(id int64, attr *AttributeTrail) *Unknown {
+ if attr == nil {
+ attr = unspecifiedAttribute
+ }
+ return &Unknown{
+ attributeTrails: map[int64][]*AttributeTrail{id: {attr}},
+ }
+}
+
+// IDs returns the set of unknown expression ids contained by this value.
+//
+// Numeric identifiers are guaranteed to be in sorted order.
+func (u *Unknown) IDs() []int64 {
+ ids := make(int64Slice, len(u.attributeTrails))
+ i := 0
+ for id := range u.attributeTrails {
+ ids[i] = id
+ i++
+ }
+ ids.Sort()
+ return ids
+}
+
+// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id.
+func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) {
+ trails, found := u.attributeTrails[id]
+ return trails, found
+}
+
+// Contains returns true if the input unknown is a subset of the current unknown.
+func (u *Unknown) Contains(other *Unknown) bool {
+ for id, otherTrails := range other.attributeTrails {
+ trails, found := u.attributeTrails[id]
+ if !found || len(otherTrails) != len(trails) {
+ return false
+ }
+ for _, ot := range otherTrails {
+ found := false
+ for _, t := range trails {
+ if t.Equal(ot) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return u.Value(), nil
+}
+
+// ConvertToType is an identity function since unknown values cannot be modified.
+func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val {
+ return u
+}
+
+// Equal is an identity function since unknown values cannot be modified.
+func (u *Unknown) Equal(other ref.Val) ref.Val {
+ return u
+}
+
+// String implements the Stringer interface
+func (u *Unknown) String() string {
+ var str strings.Builder
+ for id, attrs := range u.attributeTrails {
+ if str.Len() != 0 {
+ str.WriteString(", ")
+ }
+ if len(attrs) == 1 {
+ str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id))
+ } else {
+ str.WriteString(fmt.Sprintf("%v (%d)", attrs, id))
+ }
+ }
+ return str.String()
+}
+
+// Type implements ref.Val.Type.
+func (u *Unknown) Type() ref.Type {
+ return UnknownType
+}
+
+// Value implements ref.Val.Value.
+func (u *Unknown) Value() any {
+ return u
+}
+
+// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown
+func IsUnknown(val ref.Val) bool {
+ switch val.(type) {
+ case *Unknown:
+ return true
+ default:
+ return false
+ }
+}
+
+// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce
+// an unknown result.
+//
+// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input
+// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil.
+// If both values are non-nil and unknown, then the return value will be a merge of both unknowns.
+func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) {
+ src, isUnk := val.(*Unknown)
+ if !isUnk {
+ if unk != nil {
+ return unk, true
+ }
+ return unk, false
+ }
+ return MergeUnknowns(src, unk), true
+}
+
+// MergeUnknowns combines two unknown values into a new unknown value.
+func MergeUnknowns(unk1, unk2 *Unknown) *Unknown {
+ if unk1 == nil {
+ return unk2
+ }
+ if unk2 == nil {
+ return unk1
+ }
+ out := &Unknown{
+ attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)),
+ }
+ for id, ats := range unk1.attributeTrails {
+ out.attributeTrails[id] = ats
+ }
+ for id, ats := range unk2.attributeTrails {
+ existing, found := out.attributeTrails[id]
+ if !found {
+ out.attributeTrails[id] = ats
+ continue
+ }
+
+ for _, at := range ats {
+ found := false
+ for _, et := range existing {
+ if at.Equal(et) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ existing = append(existing, at)
+ }
+ }
+ out.attributeTrails[id] = existing
+ }
+ return out
+}
+
+// int64Slice is an implementation of the sort.Interface
+type int64Slice []int64
+
+// Len returns the number of elements in the slice.
+func (x int64Slice) Len() int { return len(x) }
+
+// Less indicates whether the value at index i is less than the value at index j.
+func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] }
+
+// Swap swaps the values at indices i and j in place.
+func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// Sort is a convenience method: x.Sort() calls Sort(x).
+func (x int64Slice) Sort() { sort.Sort(x) }
diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/util.go b/hack/tools/vendor/github.com/google/cel-go/common/types/util.go
new file mode 100644
index 0000000000..71662eee31
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/common/types/util.go
@@ -0,0 +1,48 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType.
+func IsUnknownOrError(val ref.Val) bool {
+ switch val.(type) {
+ case *Unknown, *Err:
+ return true
+ }
+ return false
+}
+
+// IsPrimitiveType returns whether the input element ref.Val is a primitive type.
+// Note, primitive types do not include well-known types such as Duration and Timestamp.
+func IsPrimitiveType(val ref.Val) bool {
+ switch val.Type() {
+ case BoolType, BytesType, DoubleType, IntType, StringType, UintType:
+ return true
+ }
+ return false
+}
+
+// Equal returns whether the two ref.Value are heterogeneously equivalent.
+func Equal(lhs ref.Val, rhs ref.Val) ref.Val {
+ lNull := lhs == NullValue
+ rNull := rhs == NullValue
+ if lNull || rNull {
+ return Bool(lNull == rNull)
+ }
+ return lhs.Equal(rhs)
+}
diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/ext/BUILD.bazel
new file mode 100644
index 0000000000..1fece70066
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/ext/BUILD.bazel
@@ -0,0 +1,72 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "bindings.go",
+ "encoders.go",
+ "formatting.go",
+ "guards.go",
+ "lists.go",
+ "math.go",
+ "native.go",
+ "protos.go",
+ "sets.go",
+ "strings.go",
+ ],
+ importpath = "github.com/google/cel-go/ext",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//cel:go_default_library",
+ "//checker:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//interpreter:go_default_library",
+ "//parser:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_x_text//language:go_default_library",
+ "@org_golang_x_text//message:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "encoders_test.go",
+ "lists_test.go",
+ "math_test.go",
+ "native_test.go",
+ "protos_test.go",
+ "sets_test.go",
+ "strings_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//cel:go_default_library",
+ "//checker:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ ],
+)
diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/README.md b/hack/tools/vendor/github.com/google/cel-go/ext/README.md
new file mode 100644
index 0000000000..07e544d0dc
--- /dev/null
+++ b/hack/tools/vendor/github.com/google/cel-go/ext/README.md
@@ -0,0 +1,899 @@
+# Extensions
+
+CEL extensions are a related set of constants, functions, macros, or other
+features which may not be covered by the core CEL spec.
+
+## Bindings
+
+Returns a cel.EnvOption to configure support for local variable bindings
+in expressions.
+
+### Cel.Bind
+
+Binds a simple identifier to an initialization expression which may be used
+in a subsequenct result expression. Bindings may also be nested within each
+other.
+
+ cel.bind(, , )
+
+Examples:
+
+ cel.bind(a, 'hello',
+ cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello"
+
+ // Avoid a list allocation within the exists comprehension.
+ cel.bind(valid_values, [a, b, c],
+ [d, e, f].exists(elem, elem in valid_values))
+
+Local bindings are not guaranteed to be evaluated before use.
+
+## Encoders
+
+Encoding utilies for marshalling data into standardized representations.
+
+### Base64.Decode
+
+Decodes base64-encoded string to bytes.
+
+This function will return an error if the string input is not
+base64-encoded.
+
+ base64.decode() ->
+
+Examples:
+
+ base64.decode('aGVsbG8=') // return b'hello'
+ base64.decode('aGVsbG8') // error
+
+### Base64.Encode
+
+Encodes bytes to a base64-encoded string.
+
+ base64.encode() ->
+
+Example:
+
+ base64.encode(b'hello') // return 'aGVsbG8='
+
+## Math
+
+Math helper macros and functions.
+
+Note, all macros use the 'math' namespace; however, at the time of macro
+expansion the namespace looks just like any other identifier. If you are
+currently using a variable named 'math', the macro will likely work just as
+intended; however, there is some chance for collision.
+
+### Math.Greatest
+
+Returns the greatest valued number present in the arguments to the macro.
+
+Greatest is a variable argument count macro which must take at least one
+argument. Simple numeric and list literals are supported as valid argument
+types; however, other literals will be flagged as errors during macro
+expansion. If the argument expression does not resolve to a numeric or
+list(numeric) type during type-checking, or during runtime then an error
+will be produced. If a list argument is empty, this too will produce an
+error.
+
+ math.greatest(, ...) ->
+
+Examples:
+
+ math.greatest(1) // 1
+ math.greatest(1u, 2u) // 2u
+ math.greatest(-42.0, -21.5, -100.0) // -21.5
+ math.greatest([-42.0, -21.5, -100.0]) // -21.5
+ math.greatest(numbers) // numbers must be list(numeric)
+
+ math.greatest() // parse error
+ math.greatest('string') // parse error
+ math.greatest(a, b) // check-time error if a or b is non-numeric
+ math.greatest(dyn('string')) // runtime error
+
+### Math.Least
+
+Returns the least valued number present in the arguments to the macro.
+
+Least is a variable argument count macro which must take at least one
+argument. Simple numeric and list literals are supported as valid argument
+types; however, other literals will be flagged as errors during macro
+expansion. If the argument expression does not resolve to a numeric or
+list(numeric) type during type-checking, or during runtime then an error
+will be produced. If a list argument is empty, this too will produce an
+error.
+
+ math.least(, ...) ->
+
+Examples:
+
+ math.least(1) // 1
+ math.least(1u, 2u) // 1u
+ math.least(-42.0, -21.5, -100.0) // -100.0
+ math.least([-42.0, -21.5, -100.0]) // -100.0
+ math.least(numbers) // numbers must be list(numeric)
+
+ math.least() // parse error
+ math.least('string') // parse error
+ math.least(a, b) // check-time error if a or b is non-numeric
+ math.least(dyn('string')) // runtime error
+
+### Math.BitOr
+
+Introduced at version: 1
+
+Performs a bitwise-OR operation over two int or uint values.
+
+ math.bitOr(, ) ->
+ math.bitOr(, ) ->
+
+Examples:
+
+ math.bitOr(1u, 2u) // returns 3u
+ math.bitOr(-2, -4) // returns -2
+
+### Math.BitAnd
+
+Introduced at version: 1
+
+Performs a bitwise-AND operation over two int or uint values.
+
+ math.bitAnd(, ) ->
+ math.bitAnd(, ) ->
+
+Examples:
+
+ math.bitAnd(3u, 2u) // return 2u
+ math.bitAnd(3, 5) // returns 3
+ math.bitAnd(-3, -5) // returns -7
+
+### Math.BitXor
+
+Introduced at version: 1
+
+ math.bitXor(, ) ->
+ math.bitXor(, ) ->
+
+Performs a bitwise-XOR operation over two int or uint values.
+
+Examples:
+
+ math.bitXor(3u, 5u) // returns 6u
+ math.bitXor(1, 3) // returns 2
+
+### Math.BitNot
+
+Introduced at version: 1
+
+Function which accepts a single int or uint and performs a bitwise-NOT
+ones-complement of the given binary value.
+
+ math.bitNot() ->
+ math.bitNot() ->
+
+Examples
+
+ math.bitNot(1) // returns -1
+ math.bitNot(-1) // return 0
+ math.bitNot(0u) // returns 18446744073709551615u
+
+### Math.BitShiftLeft
+
+Introduced at version: 1
+
+Perform a left shift of bits on the first parameter, by the amount of bits
+specified in the second parameter. The first parameter is either a uint or
+an int. The second parameter must be an int.
+
+When the second parameter is 64 or greater, 0 will be always be returned
+since the number of bits shifted is greater than or equal to the total bit
+length of the number being shifted. Negative valued bit shifts will result
+in a runtime error.
+
+ math.bitShiftLeft(, ) ->
+ math.bitShiftLeft(, ) ->
+
+Examples
+
+ math.bitShiftLeft(1, 2) // returns 4
+ math.bitShiftLeft(-1, 2) // returns -4
+ math.bitShiftLeft(1u, 2) // return 4u
+ math.bitShiftLeft(1u, 200) // returns 0u
+
+### Math.BitShiftRight
+
+Introduced at version: 1
+
+Perform a right shift of bits on the first parameter, by the amount of bits
+specified in the second parameter. The first parameter is either a uint or
+an int. The second parameter must be an int.
+
+When the second parameter is 64 or greater, 0 will always be returned since
+the number of bits shifted is greater than or equal to the total bit length
+of the number being shifted. Negative valued bit shifts will result in a
+runtime error.
+
+The sign bit extension will not be preserved for this operation: vacant bits
+on the left are filled with 0.
+
+ math.bitShiftRight(, ) ->
+ math.bitShiftRight(, ) ->
+
+Examples
+
+ math.bitShiftRight(1024, 2) // returns 256
+ math.bitShiftRight(1024u, 2) // returns 256u
+ math.bitShiftRight(1024u, 64) // returns 0u
+
+### Math.Ceil
+
+Introduced at version: 1
+
+Compute the ceiling of a double value.
+
+ math.ceil() ->
+
+Examples:
+
+ math.ceil(1.2) // returns 2.0
+ math.ceil(-1.2) // returns -1.0
+
+### Math.Floor
+
+Introduced at version: 1
+
+Compute the floor of a double value.
+
+ math.floor() ->
+
+Examples:
+
+ math.floor(1.2) // returns 1.0
+ math.floor(-1.2) // returns -2.0
+
+### Math.Round
+
+Introduced at version: 1
+
+Rounds the double value to the nearest whole number with ties rounding away
+from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0.
+
+ math.round() ->
+
+Examples:
+
+ math.round(1.2) // returns 1.0
+ math.round(1.5) // returns 2.0
+ math.round(-1.5) // returns -2.0
+
+### Math.Trunc
+
+Introduced at version: 1
+
+Truncates the fractional portion of the double value.
+
+ math.trunc() ->
+
+Examples:
+
+ math.trunc(-1.3) // returns -1.0
+ math.trunc(1.3) // returns 1.0
+
+### Math.Abs
+
+Introduced at version: 1
+
+Returns the absolute value of the numeric type provided as input. If the
+value is NaN, the output is NaN. If the input is int64 min, the function
+will result in an overflow error.
+
+ math.abs() ->
+ math.abs() ->
+ math.abs() ->
+
+Examples:
+
+ math.abs(-1) // returns 1
+ math.abs(1) // returns 1
+ math.abs(-9223372036854775808) // overlflow error
+
+### Math.Sign
+
+Introduced at version: 1
+
+Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or
+uint depending on the overload. For floating point values, if NaN is
+provided as input, the output is also NaN. The implementation does not
+differentiate between positive and negative zero.
+
+ math.sign() ->
+ math.sign() ->
+ math.sign() ->
+
+Examples:
+
+ math.sign(-42) // returns -1
+ math.sign(0) // returns 0
+ math.sign(42) // returns 1
+
+### Math.IsInf
+
+Introduced at version: 1
+
+Returns true if the input double value is -Inf or +Inf.
+
+ math.isInf() ->
+
+Examples:
+
+ math.isInf(1.0/0.0) // returns true
+ math.isInf(1.2) // returns false
+
+### Math.IsNaN
+
+Introduced at version: 1
+
+Returns true if the input double value is NaN, false otherwise.
+
+ math.isNaN() ->
+
+Examples:
+
+ math.isNaN(0.0/0.0) // returns true
+ math.isNaN(1.2) // returns false
+
+### Math.IsFinite
+
+Introduced at version: 1
+
+Returns true if the value is a finite number. Equivalent in behavior to:
+!math.isNaN(double) && !math.isInf(double)
+
+ math.isFinite() ->
+
+Examples:
+
+ math.isFinite(0.0/0.0) // returns false
+ math.isFinite(1.2) // returns true
+
+## Protos
+
+Protos configure extended macros and functions for proto manipulation.
+
+Note, all macros use the 'proto' namespace; however, at the time of macro
+expansion the namespace looks just like any other identifier. If you are
+currently using a variable named 'proto', the macro will likely work just as
+you intend; however, there is some chance for collision.
+
+### Protos.GetExt
+
+Macro which generates a select expression that retrieves an extension field
+from the input proto2 syntax message. If the field is not set, the default
+value forthe extension field is returned according to safe-traversal semantics.
+
+ proto.getExt(, ) ->
+
+Example:
+
+ proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value
+
+### Protos.HasExt
+
+Macro which generates a test-only select expression that determines whether
+an extension field is set on a proto2 syntax message.
+
+ proto.hasExt(, ) ->
+
+Example:
+
+ proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false
+
+## Lists
+
+Extended functions for list manipulation. As a general note, all indices are
+zero-based.
+
+### Distinct
+
+**Introduced in version 2**
+
+Returns the distinct elements of a list.
+
+ .distinct() ->
+
+Examples:
+
+ [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3]
+ ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"]
+ [1, "b", 2, "b"].distinct() // return [1, "b", 2]
+
+### Flatten
+
+**Introduced in version 1**
+
+Flattens a list recursively.
+If an optional depth is provided, the list is flattened to a the specificied level.
+A negative depth value will result in an error.
+
+ .flatten() ->
+ .flatten(, ) ->
+
+Examples:
+
+ [1,[2,3],[4]].flatten() // return [1, 2, 3, 4]
+ [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]]
+ [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4]
+ [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]]
+ [1,[2,[3,[4]]]].flatten(-1) // error
+
+### Range
+
+**Introduced in version 2**
+
+Returns a list of integers from 0 to n-1.
+
+ lists.range() ->
+
+Examples:
+
+ lists.range(5) -> [0, 1, 2, 3, 4]
+
+
+### Reverse
+
+**Introduced in version 2**
+
+Returns the elements of a list in reverse order.
+
+ .reverse() ->
+
+Examples:
+
+ [5, 3, 1, 2].reverse() // return [2, 1, 3, 5]
+
+
+### Slice
+
+
+Returns a new sub-list using the indexes provided.
+
+ .slice(, ) ->
+
+Examples:
+
+ [1,2,3,4].slice(1, 3) // return [2, 3]
+ [1,2,3,4].slice(2, 4) // return [3, 4]
+
+### Sort
+
+**Introduced in version 2**
+
+Sorts a list with comparable elements. If the element type is not comparable
+or the element types are not the same, the function will produce an error.
+
+ .sort() ->
+ T in {int, uint, double, bool, duration, timestamp, string, bytes}
+
+Examples:
+
+ [3, 2, 1].sort() // return [1, 2, 3]
+ ["b", "c", "a"].sort() // return ["a", "b", "c"]
+ [1, "b"].sort() // error
+ [[1, 2, 3]].sort() // error
+
+### SortBy
+
+**Introduced in version 2**
+
+Sorts a list by a key value, i.e., the order is determined by the result of
+an expression applied to each element of the list.
+
+ .sortBy(, ) ->
+ keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes}
+
+Examples:
+
+ [
+ Player { name: "foo", score: 0 },
+ Player { name: "bar", score: -10 },
+ Player { name: "baz", score: 1000 },
+ ].sortBy(e, e.score).map(e, e.name)
+ == ["bar", "foo", "baz"]
+
+## Sets
+
+Sets provides set relationship tests.
+
+There is no set type within CEL, and while one may be introduced in the
+future, there are cases where a `list` type is known to behave like a set.
+For such cases, this library provides some basic functionality for
+determining set containment, equivalence, and intersection.
+
+### Sets.Contains
+
+Returns whether the first list argument contains all elements in the second
+list argument. The list may contain elements of any type and standard CEL
+equality is used to determine whether a value exists in both lists. If the
+second list is empty, the result will always return true.
+
+ sets.contains(list(T), list(T)) -> bool
+
+Examples:
+
+ sets.contains([], []) // true
+ sets.contains([], [1]) // false
+ sets.contains([1, 2, 3, 4], [2, 3]) // true
+ sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true
+
+### Sets.Equivalent
+
+Returns whether the first and second list are set equivalent. Lists are set
+equivalent if for every item in the first list, there is an element in the
+second which is equal. The lists may not be of the same size as they do not
+guarantee the elements within them are unique, so size does not factor into
+the computation.
+
+ sets.equivalent(list(T), list(T)) -> bool
+
+Examples:
+
+ sets.equivalent([], []) // true
+ sets.equivalent([1], [1, 1]) // true
+ sets.equivalent([1], [1u, 1.0]) // true
+ sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true
+
+### Sets.Intersects
+
+Returns whether the first list has at least one element whose value is equal
+to an element in the second list. If either list is empty, the result will
+be false.
+
+ sets.intersects(list(T), list(T)) -> bool
+
+Examples:
+
+ sets.intersects([1], []) // false
+ sets.intersects([1], [1, 2]) // true
+ sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true
+
+## Strings
+
+Extended functions for string manipulation. As a general note, all indices are
+zero-based.
+
+### CharAt
+
+Returns the character at the given position. If the position is negative, or
+greater than the length of the string, the function will produce an error:
+
+ .charAt() ->
+
+Examples:
+
+ 'hello'.charAt(4) // return 'o'
+ 'hello'.charAt(5) // return ''
+ 'hello'.charAt(-1) // error
+
+### IndexOf
+
+Returns the integer index of the first occurrence of the search string. If the
+search string is not found the function returns -1.
+
+The function also accepts an optional position from which to begin the
+substring search. If the substring is the empty string, the index where the
+search starts is returned (zero or custom).
+
+ .indexOf() ->
+ .indexOf(, ) ->
+
+Examples:
+
+ 'hello mellow'.indexOf('') // returns 0
+ 'hello mellow'.indexOf('ello') // returns 1
+ 'hello mellow'.indexOf('jello') // returns -1
+ 'hello mellow'.indexOf('', 2) // returns 2
+ 'hello mellow'.indexOf('ello', 2) // returns 7
+ 'hello mellow'.indexOf('ello', 20) // returns -1
+ 'hello mellow'.indexOf('ello', -1) // error
+
+### Join
+
+Returns a new string where the elements of string list are concatenated.
+
+The function also accepts an optional separator which is placed between
+elements in the resulting string.
+
+ >.join() ->
+ >.join() ->
+
+Examples:
+
+ ['hello', 'mellow'].join() // returns 'hellomellow'
+ ['hello', 'mellow'].join(' ') // returns 'hello mellow'
+ [].join() // returns ''
+ [].join('/') // returns ''
+
+### LastIndexOf
+
+Returns the integer index of the last occurrence of the search string. If the
+search string is not found the function returns -1.
+
+The function also accepts an optional position which represents the last index
+to be considered as the beginning of the substring match. If the substring is
+the empty string, the index where the search starts is returned (string length
+or custom).
+
+ .lastIndexOf() ->
+ .lastIndexOf(, ) ->
+
+Examples:
+
+ 'hello mellow'.lastIndexOf('') // returns 12
+ 'hello mellow'.lastIndexOf('ello') // returns 7
+ 'hello mellow'.lastIndexOf('jello') // returns -1
+ 'hello mellow'.lastIndexOf('ello', 6) // returns 1
+ 'hello mellow'.lastIndexOf('ello', 20) // returns -1
+ 'hello mellow'.lastIndexOf('ello', -1) // error
+
+### LowerAscii
+
+Returns a new string where all ASCII characters are lower-cased.
+
+This function does not perform Unicode case-mapping for characters outside the
+ASCII range.
+
+ .lowerAscii() ->
+
+Examples:
+
+ 'TacoCat'.lowerAscii() // returns 'tacocat'
+ 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+
+### Quote
+
+**Introduced in version 1**
+
+Takes the given string and makes it safe to print (without any formatting due to escape sequences).
+If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD.
+
+ strings.quote()
+
+Examples:
+
+ strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""'
+ strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"'
+
+### Replace
+
+Returns a new string based on the target, which replaces the occurrences of a
+search string with a replacement string if present. The function accepts an
+optional limit on the number of substring replacements to be made.
+
+When the replacement limit is 0, the result is the original string. When the
+limit is a negative number, the function behaves the same as replace all.
+
+ .replace(, ) ->
+ .replace(, , ) ->
+
+Examples:
+
+ 'hello hello'.replace('he', 'we') // returns 'wello wello'
+ 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
+ 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
+ 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
+
+### Split
+
+Returns a list of strings split from the input by the given separator. The
+function accepts an optional argument specifying a limit on the number of
+substrings produced by the split.
+
+When the split limit is 0, the result is an empty list. When the limit is 1,
+the result is the target string to split. When the limit is a negative
+number, the function behaves the same as split all.
+
+ .split() -> >
+ .split(, ) -> >
+
+Examples:
+
+ 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
+ 'hello hello hello'.split(' ', 0) // returns []
+ 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
+ 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
+ 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
+
+### Substring
+
+Returns the substring given a numeric range corresponding to character
+positions. Optionally may omit the trailing range for a substring from a given
+character position until the end of a string.
+
+Character offsets are 0-based with an inclusive start range and exclusive end
+range. It is an error to specify an end range that is lower than the start
+range, or for either the start or end index to be negative or exceed the string
+length.
+
+ .substring() ->
+ .substring(, ) ->
+
+Examples:
+
+ 'tacocat'.substring(4) // returns 'cat'
+ 'tacocat'.substring(0, 4) // returns 'taco'
+ 'tacocat'.substring(-1) // error
+ 'tacocat'.substring(2, 1) // error
+
+### Trim
+
+Returns a new string which removes the leading and trailing whitespace in the
+target string. The trim function uses the Unicode definition of whitespace
+which does not include the zero-width spaces. See:
+https://en.wikipedia.org/wiki/Whitespace_character#Unicode
+
+ .trim() ->
+
+Examples:
+
+ ' \ttrim\n '.trim() // returns 'trim'
+
+### UpperAscii
+
+Returns a new string where all ASCII characters are upper-cased.
+
+This function does not perform Unicode case-mapping for characters outside the
+ASCII range.
+
+ .upperAscii() ->
+
+Examples:
+
+ 'TacoCat'.upperAscii() // returns 'TACOCAT'
+ 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
+
+### Reverse
+
+Returns a new string whose characters are the same as the target string, only formatted in
+reverse order.
+This function relies on converting strings to rune arrays in order to reverse.
+It can be located in Version 3 of strings.
+
+ .reverse() ->
+
+Examples:
+
+ 'gums'.reverse() // returns 'smug'
+ 'John Smith'.reverse() // returns 'htimS nhoJ'
+
+## TwoVarComprehensions
+
+TwoVarComprehensions introduces support for two-variable comprehensions.
+
+The two-variable form of comprehensions looks similar to the one-variable
+counterparts. Where possible, the same macro names were used and additional
+macro signatures added. The notable distinction for two-variable comprehensions
+is the introduction of `transformList`, `transformMap`, and `transformMapEntry`
+support for list and map types rather than the more traditional `map` and
+`filter` macros.
+
+### All
+
+Comprehension which tests whether all elements in the list or map satisfy a
+given predicate. The `all` macro evaluates in a manner consistent with logical
+AND and will short-circuit when encountering a `false` value.
+
+ .all(indexVar, valueVar, ) -> bool
+