diff --git a/go.mod b/go.mod index 7cfad9492..d5962a0b4 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/containers/prometheus-podman-exporter go 1.22.6 require ( - github.com/containers/common v0.60.4 - github.com/containers/image/v5 v5.32.2 - github.com/containers/podman/v5 v5.2.5 + github.com/containers/common v0.61.0 + github.com/containers/image/v5 v5.33.0 + github.com/containers/podman/v5 v5.3.0 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 @@ -17,11 +17,11 @@ require ( ) require ( - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.5 // indirect + github.com/Microsoft/hcsshim v0.12.9 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect @@ -29,41 +29,42 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/checkpoint-restore/checkpointctl v1.2.1 // indirect - github.com/checkpoint-restore/go-criu/v7 v7.1.0 // indirect + github.com/checkpoint-restore/checkpointctl v1.3.0 // indirect + github.com/checkpoint-restore/go-criu/v7 v7.2.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.18 // indirect - github.com/containerd/errdefs v0.1.0 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/containerd/typeurl/v2 v2.1.1 // indirect + github.com/containerd/typeurl/v2 v2.2.0 // indirect github.com/containernetworking/cni v1.2.3 // indirect github.com/containernetworking/plugins v1.5.1 // indirect - github.com/containers/buildah v1.37.5 // indirect + github.com/containers/buildah v1.38.0 // indirect github.com/containers/conmon v2.0.20+incompatible // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c // indirect + github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 // indirect github.com/containers/ocicrypt v1.2.0 // indirect github.com/containers/psgo v1.9.0 // indirect - github.com/containers/storage v1.55.1 // indirect + github.com/containers/storage v1.56.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect - github.com/cyphar/filepath-securejoin v0.3.1 // indirect + github.com/cyphar/filepath-securejoin v0.3.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.1.1+incompatible // indirect + github.com/docker/docker v27.3.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect - github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 // indirect + github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fsouza/go-dockerclient v1.11.1 // indirect - github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsouza/go-dockerclient v1.12.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect @@ -77,12 +78,12 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect + github.com/godbus/dbus/v5 v5.1.1-0.20240921181615-a817f3cc4a9e // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.20.0 // indirect + github.com/google/go-containerregistry v0.20.2 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect @@ -97,26 +98,29 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/mattn/go-sqlite3 v1.14.24 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/buildkit v0.12.5 // indirect + github.com/moby/buildkit v0.17.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/capability v0.3.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -127,13 +131,14 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runc v1.1.13 // indirect + github.com/opencontainers/runc v1.2.1 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect - github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect - github.com/opencontainers/selinux v1.11.0 // indirect - github.com/openshift/imagebuilder v1.2.14 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20241001195557-6c9570a1678f // indirect + github.com/opencontainers/selinux v1.11.1 // indirect + github.com/openshift/imagebuilder v1.2.15 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect - github.com/pkg/sftp v1.13.6 // indirect + github.com/pkg/sftp v1.13.7 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/prometheus/client_model v0.6.1 // indirect @@ -141,41 +146,42 @@ require ( github.com/rivo/uniseg v0.4.7 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/fulcio v1.6.4 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.4 // indirect + github.com/sigstore/sigstore v1.8.9 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.18.0 // indirect + github.com/sylabs/sif/v2 v2.19.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.7.5 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vbatts/tar-split v0.11.6 // indirect + github.com/vbauerster/mpb/v8 v8.8.3 // indirect + github.com/vishvananda/netlink v1.3.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect - go.etcd.io/bbolt v1.3.10 // indirect + go.etcd.io/bbolt v1.3.11 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.26.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/grpc v1.64.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index a2fec7315..227c0b6f3 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -12,8 +12,8 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0 github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= -github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= +github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= +github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -26,15 +26,15 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/checkpointctl v1.2.1 h1:aYFl2CEk95bPLDvNDgif4ZLx3pjCZMJm6td+A0X1+xs= -github.com/checkpoint-restore/checkpointctl v1.2.1/go.mod h1:8oF+AtNUFJAI13ETcbB3clnjiwvviX0QzVBhYzQ8yBA= -github.com/checkpoint-restore/go-criu/v7 v7.1.0 h1:JbQyO4o+P8ycNTMLPiiDqXg49bAcy4WljWCzYQho35A= -github.com/checkpoint-restore/go-criu/v7 v7.1.0/go.mod h1:1svAtmbtvX4BKI45OFzgoTTLG7oYFKdColv/Vcsb2A8= +github.com/checkpoint-restore/checkpointctl v1.3.0 h1:bNz5b6s+lxFdG5ZGDba3qSkBtXDDTCG2494dfAbQJ4E= +github.com/checkpoint-restore/checkpointctl v1.3.0/go.mod h1:dqZH4wDvbjnsqFGK2LdUDk21yFQ1dCAtzgRMlG44KDM= +github.com/checkpoint-restore/go-criu/v7 v7.2.0 h1:qGiWA4App1gGlEfIJ68WR9jbezV9J7yZdjzglezcqKo= +github.com/checkpoint-restore/go-criu/v7 v7.2.0/go.mod h1:u0LCWLg0w4yqqu14aXhiB4YD3a1qd8EcCEg7vda5dwo= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -48,40 +48,42 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= -github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= -github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= -github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= -github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= -github.com/containers/buildah v1.37.5 h1:oJ+cVbtgxB3ZHux4No9rKbWfcd7uoDpk8r7wcbm+Vbo= -github.com/containers/buildah v1.37.5/go.mod h1:kiNTdC/78ek5XfqX6xUAq5aR8HNVy+CQ4ODjUNbiPJM= -github.com/containers/common v0.60.4 h1:H5+LAMHPZEqX6vVNOQ+IguVsaFl8kbO/SZ/VPXjxhy0= -github.com/containers/common v0.60.4/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= +github.com/containers/buildah v1.38.0 h1:FmciZMwzhdcvtWj+8IE+61+lfTG2JfgrbZ2DUnEMnTE= +github.com/containers/buildah v1.38.0/go.mod h1:tUsHC2bcgR5Q/R76qZUn7x0FRglqPFry2g5KhWfH4LI= +github.com/containers/common v0.61.0 h1:j/84PTqZIKKYy42OEJsZmjZ4g4Kq2ERuC3tqp2yWdh4= +github.com/containers/common v0.61.0/go.mod h1:NGRISq2vTFPSbhNqj6MLwyes4tWSlCnqbJg7R77B8xc= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.32.2 h1:SzNE2Y6sf9b1GJoC8qjCuMBXwQrACFp4p0RK15+4gmQ= -github.com/containers/image/v5 v5.32.2/go.mod h1:v1l73VeMugfj/QtKI+jhYbwnwFCFnNGckvbST3rQ5Hk= +github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE= +github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c h1:gJDiBJYc8JFD46IJmr8SqGOcueGSRGnuhW6wgXiAjr0= -github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c/go.mod h1:Ufusu7xAtl0LSTry0JS6dSxbxR/XJQSEqlhLqTkCaH8= +github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 h1:57rxgU2wdI3lZMDZtao09WjCWmxBKOxI/Sj37IpCV50= +github.com/containers/luksy v0.0.0-20241007190014-e2530d691420/go.mod h1:MYzFCudLgMcXgFl7XuFjUowNDTBqL09BfEgMf7QHtO4= github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= -github.com/containers/podman/v5 v5.2.5 h1:OqipZ/e34NSJ+JafLiyD1zM5G4404hDbNelSOPAlSes= -github.com/containers/podman/v5 v5.2.5/go.mod h1:In5i6rJrJ3aYX3dvpllL4vLFi6har7hHVe6RcJYPRR8= +github.com/containers/podman/v5 v5.3.0 h1:C/eK7Ek1XV+ZX5nUQaqK5OM2+WTtDpLrybOHEbmgAuA= +github.com/containers/podman/v5 v5.3.0/go.mod h1:xUbIlPCLXYMmjs1FHVtNbYWYlDONCFxzZK8MaHMcFlI= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.55.1 h1:ius7angdTqxO56hmTJnAznyEcUnYeLOV3ybwLozA/h8= -github.com/containers/storage v1.55.1/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbFOeLunhER1RQ= +github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI= +github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= @@ -91,8 +93,8 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= -github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= +github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= +github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -101,20 +103,20 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE= -github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= +github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 h1:YcvzLmdrP/b8kLAGJ8GT7bdncgCAiWxJZIlt84D+RJg= -github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= +github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8 h1:IMfrF5LCzP2Vhw7j4IIH3HxPsCLuZYjDqFAM/C88ulg= +github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -124,12 +126,12 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fsouza/go-dockerclient v1.11.1 h1:i5Vk9riDxW2uP9pVS5FYkpquMTFT5lsx2pt7oErRTjI= -github.com/fsouza/go-dockerclient v1.11.1/go.mod h1:UfjOOaspAq+RGh7GX1aZ0HeWWGHQWWsh+H5BgEWB3Pk= -github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= -github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU= +github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -157,11 +159,11 @@ github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3Bum github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= +github.com/godbus/dbus/v5 v5.1.1-0.20240921181615-a817f3cc4a9e h1:znsZ+BW06LsAtZwQvY/rgWQ3o1q0mnR4SG4q8HCP+3Q= +github.com/godbus/dbus/v5 v5.1.1-0.20240921181615-a817f3cc4a9e/go.mod h1:nRJ+j259aT/CW6otoGCHPa1K/lNHLO+UGmW133FNj9s= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -178,7 +180,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -187,12 +188,11 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.0 h1:wRqHpOeVh3DnenOrPy9xDOLdnLatiGuuNRVelR2gSbg= -github.com/google/go-containerregistry v0.20.0/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= +github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -210,8 +210,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -229,10 +229,12 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -243,8 +245,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I= -github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= @@ -253,8 +255,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= @@ -265,18 +267,22 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= -github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso= +github.com/moby/buildkit v0.17.1 h1:VWj6eIdk7u6acHPn2CiA+tdq0/mQoBEk9ckweRzWmPw= +github.com/moby/buildkit v0.17.1/go.mod h1:ru8NFyDHD8HbuKaLXJIjK9nr3x6FZR+IWjtF07S+wdM= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg= +github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -306,18 +312,20 @@ github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 h1:NwSQ/5rex github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0/go.mod h1:tBsQqk9ETVlXxzXjk2Xh/1VjxC/U3Gaq5ps/rC/cadE= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= -github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= -github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/imagebuilder v1.2.14 h1:l4gUw0KIsjZrX7otfS4WoKxzGBrxYldU3pF4+5W/ud8= -github.com/openshift/imagebuilder v1.2.14/go.mod h1:KkkXOyRjJlZEXWQtHNBNzVHqh4vf/0xX5cDIQ2gr+5I= +github.com/opencontainers/runtime-tools v0.9.1-0.20241001195557-6c9570a1678f h1:tGGVO3yF9p5s/mPi3kO1AdoUDK49z0dgQqV0jeT1kik= +github.com/opencontainers/runtime-tools v0.9.1-0.20241001195557-6c9570a1678f/go.mod h1:oIH6VwKkaDOO+SIYZpdwrC/0wKYqrfO6E1sG1j3UVws= +github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= +github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/imagebuilder v1.2.15 h1:MNn1OztEE/l8pSEDPYAQ71Ys6rpXA2P00UFhdY9p/yk= +github.com/openshift/imagebuilder v1.2.15/go.mod h1:cK6MLyBl1IHmIYGLY/2SLOG6p0PtEDUOC7khxsFYUXE= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= +github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -337,25 +345,27 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY= +github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= -github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY= +github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w= -github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg= +github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= +github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -372,8 +382,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI= -github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo= +github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0= +github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= @@ -382,13 +392,12 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.7.5 h1:hUF3zaNsuaBBwzEFoCvfuX3cpesQXZC0Phm/JcHZQ+c= -github.com/vbauerster/mpb/v8 v8.7.5/go.mod h1:bRCnR7K+mj5WXKsy0NWB6Or+wctYGvVwKn6huwvxKa0= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= +github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ= +github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk= +github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= +github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= @@ -400,46 +409,47 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -453,7 +463,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -465,14 +476,13 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -481,24 +491,31 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -508,6 +525,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -519,18 +537,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -540,8 +558,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore index 529c3412b..45ad0f1ae 100644 --- a/vendor/dario.cat/mergo/.gitignore +++ b/vendor/dario.cat/mergo/.gitignore @@ -13,6 +13,9 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +# Golang/Intellij +.idea + # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/ diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 7d0cf9f32..0b3c48889 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild). + +No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases. ### Important notes #### 1.0.0 -In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released. + +If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL: + +``` +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16 +``` #### 0.3.9 @@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: -Buy Me a Coffee at ko-fi.com Donate using Liberapay Become my sponsor ### Mergo in the wild -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) +Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including: + +* [containerd/containerd](https://github.com/containerd/containerd) +* [datadog/datadog-agent](https://github.com/datadog/datadog-agent) +* [docker/cli/](https://github.com/docker/cli/) +* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +* [go-micro/go-micro](https://github.com/go-micro/go-micro) +* [grafana/loki](https://github.com/grafana/loki) +* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +* [masterminds/sprig](github.com/Masterminds/sprig) +* [moby/moby](https://github.com/moby/moby) +* [slackhq/nebula](https://github.com/slackhq/nebula) +* [volcano-sh/volcano](https://github.com/volcano-sh/volcano) ## Install @@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { } ``` +If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`: + +```go +package main + +import ( + "fmt" + + "dario.cat/mergo" +) + +type Foo struct { + A *string + B int64 +} + +func main() { + first := "first" + second := "second" + src := Foo{ + A: &first, + B: 2, + } + + dest := Foo{ + A: &second, + B: 1, + } + + mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference) +} +``` + Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go index b50d5c2a4..759b4f74f 100644 --- a/vendor/dario.cat/mergo/map.go +++ b/vendor/dario.cat/mergo/map.go @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue { dstMap[fieldName] = src.Field(i).Interface() } } diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go index 0ef9b2138..fd47c95b2 100644 --- a/vendor/dario.cat/mergo/merge.go +++ b/vendor/dario.cat/mergo/merge.go @@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } - } else { + } else if src.Elem().Kind() != reflect.Struct { if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { dst.Set(src) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 6238e103b..a15609abd 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -29,7 +29,7 @@ const ( ) func (es EndpointState) String() string { - return [...]string{"Uninitialized", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es] + return [...]string{"Uninitialized", "Created", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es] } // HNSEndpoint represents a network endpoint in HNS diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index 10ae4d670..b505731c3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -188,7 +188,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { return nil, winapi.RtlNtStatusToDosError(status) } } else { - jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, 0, unicodeJobName.Buffer) + jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) if err != nil { return nil, err } @@ -523,12 +523,9 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error func isJobSilo(h windows.Handle) bool { // None of the information from the structure that this info class expects will be used, this is just used as // the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job - // if it's a silo or not. Because none of the info matters simply define a dummy struct with the size that the call - // expects which is 16 bytes. - type isSiloObj struct { - _ [16]byte - } - var siloInfo isSiloObj + // if it's a silo or not. We still need to define the struct layout as expected by Win32, else the struct + // alignment might be different and the call will fail. + var siloInfo winapi.SILOOBJECT_BASIC_INFORMATION err := winapi.QueryInformationJobObject( h, winapi.JobObjectSiloBasicInformation, diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go index 8c41a3661..bf8186401 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -6,7 +6,7 @@ import ( "net" "os" - "github.com/containerd/errdefs" + errdefs "github.com/containerd/errdefs/pkg/errgrpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go index b0deb5c72..4c04dd3f8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -28,7 +28,7 @@ const ( // https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights const ( JOB_OBJECT_QUERY = 0x0004 - JOB_OBJECT_ALL_ACCESS = 0x1F001F + JOB_OBJECT_ALL_ACCESS = 0x1F003F ) // IO limit flags @@ -160,6 +160,21 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { CompletionPort windows.Handle } +// typedef struct _SILOOBJECT_BASIC_INFORMATION { +// DWORD SiloId; +// DWORD SiloParentId; +// DWORD NumberOfProcesses; +// BOOLEAN IsInServerSilo; +// BYTE Reserved[3]; +// } SILOOBJECT_BASIC_INFORMATION, *PSILOOBJECT_BASIC_INFORMATION; +type SILOOBJECT_BASIC_INFORMATION struct { + SiloID uint32 + SiloParentID uint32 + NumberOfProcesses uint32 + IsInServerSilo bool + Reserved [3]uint8 +} + // BOOL IsProcessInJob( // HANDLE ProcessHandle, // HANDLE JobHandle, @@ -184,7 +199,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // LPCWSTR lpName // ); // -//sys OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW +//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW // DWORD SetIoRateControlInformationJobObject( // HANDLE hJob, diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index 33720fe8b..ecdded312 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -470,8 +470,12 @@ func LocalFree(ptr uintptr) { return } -func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) +func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) handle = windows.Handle(r0) if handle == 0 { err = errnoErr(e1) diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore index d8b4f7401..eb1b08bc4 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore @@ -15,3 +15,4 @@ scripts/magic-gen/expected.go scripts/magic-gen/output.go crit/bin crit/test-imgs/ +__pycache__ diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml b/vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml index 694f6adf8..a0d20be21 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml @@ -16,3 +16,7 @@ linters: linters-settings: exhaustive: default-signifies-exhaustive: true + gosec: + excludes: + # https://github.com/securego/gosec/issues/1185 + - G115 diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/Makefile b/vendor/github.com/checkpoint-restore/go-criu/v7/Makefile index 6aacde06e..12e064237 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v7/Makefile +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/Makefile @@ -18,9 +18,6 @@ test: build coverage: $(MAKE) -C test coverage -codecov: - $(MAKE) -C test codecov - rpc/rpc.proto: curl -sSL https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/rpc.proto -o $@ @@ -42,4 +39,4 @@ clean: $(MAKE) -C crit/ clean $(MAKE) -C test/ clean -.PHONY: build test lint vendor coverage codecov clean +.PHONY: build test lint vendor coverage clean diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/README.md b/vendor/github.com/checkpoint-restore/go-criu/v7/README.md index 832c3949a..14a08eb7c 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v7/README.md +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/README.md @@ -62,7 +62,8 @@ The following table shows the relation between go-criu and criu versions: | Major version | Latest release | CRIU version | | -------------- | -------------- | ------------ | -| v7             | 7.1.0         | 3.18         | +| v7             | 7.2.0         | 3.19         | +| v7             | 7.0.0         | 3.18         | | v6             | 6.3.0         | 3.17         | | v5             | 5.3.0         | 3.16         | | v5             | 5.0.0         | 3.15         | diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/main.go b/vendor/github.com/checkpoint-restore/go-criu/v7/main.go index 6906c9811..8f29d2ee2 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v7/main.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/main.go @@ -61,13 +61,19 @@ func (c *Criu) Prepare() error { } // Cleanup cleans up -func (c *Criu) Cleanup() { +func (c *Criu) Cleanup() error { + var errs []error if c.swrkCmd != nil { - c.swrkSk.Close() + if err := c.swrkSk.Close(); err != nil { + errs = append(errs, err) + } c.swrkSk = nil - _ = c.swrkCmd.Wait() + if err := c.swrkCmd.Wait(); err != nil { + errs = append(errs, fmt.Errorf("criu swrk failed: %w", err)) + } c.swrkCmd = nil } + return errors.Join(errs...) } func (c *Criu) sendAndRecv(reqB []byte) ([]byte, int, error) { @@ -99,9 +105,7 @@ func (c *Criu) doSwrk(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) e return nil } -func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify, features *rpc.CriuFeatures) (*rpc.CriuResp, error) { - var resp *rpc.CriuResp - +func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify, features *rpc.CriuFeatures) (resp *rpc.CriuResp, retErr error) { req := rpc.CriuReq{ Type: &reqType, Opts: opts, @@ -121,7 +125,13 @@ func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy N return nil, err } - defer c.Cleanup() + defer func() { + // append any cleanup errors to the returned error + err := c.Cleanup() + if err != nil { + retErr = errors.Join(retErr, err) + } + }() } for { diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f0277..000000000 --- a/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errdefs_deprecated.go b/vendor/github.com/containerd/containerd/errdefs/errdefs_deprecated.go deleted file mode 100644 index c6a0d843e..000000000 --- a/vendor/github.com/containerd/containerd/errdefs/errdefs_deprecated.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with fmt.Errorf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. -package errdefs - -import ( - "github.com/containerd/errdefs" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. -var ( - ErrUnknown = errdefs.ErrUnknown - ErrInvalidArgument = errdefs.ErrInvalidArgument - ErrNotFound = errdefs.ErrNotFound - ErrAlreadyExists = errdefs.ErrAlreadyExists - ErrFailedPrecondition = errdefs.ErrFailedPrecondition - ErrUnavailable = errdefs.ErrUnavailable - ErrNotImplemented = errdefs.ErrNotImplemented -) - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errdefs.IsInvalidArgument(err) -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errdefs.IsNotFound(err) -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errdefs.IsAlreadyExists(err) -} - -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errdefs.IsFailedPrecondition(err) -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errdefs.IsUnavailable(err) -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errdefs.IsNotImplemented(err) -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errdefs.IsCanceled(err) -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errdefs.IsDeadlineExceeded(err) -} - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - return errdefs.ToGRPC(err) -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return errdefs.ToGRPCf(err, format, args...) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - return errdefs.FromGRPC(err) -} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go deleted file mode 100644 index 6656465ef..000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -import ( - "bufio" - "fmt" - "os" - "sync" -) - -var ( - inUserNS bool - nsOnce sync.Once -) - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - nsOnce.Do(func() { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return - } - inUserNS = true - }) - return inUserNS -} diff --git a/vendor/github.com/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go index 876225597..f654d1964 100644 --- a/vendor/github.com/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/errdefs/errors.go @@ -21,9 +21,6 @@ // // To detect an error class, use the IsXXX functions to tell whether an error // is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. package errdefs import ( @@ -36,57 +33,411 @@ import ( // Packages should return errors of these types when they want to instruct a // client to take a particular action. // -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. +// These errors map closely to grpc errors. var ( - ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid argument") - ErrNotFound = errors.New("not found") - ErrAlreadyExists = errors.New("already exists") - ErrFailedPrecondition = errors.New("failed precondition") - ErrUnavailable = errors.New("unavailable") - ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented + ErrUnknown = errUnknown{} + ErrInvalidArgument = errInvalidArgument{} + ErrNotFound = errNotFound{} + ErrAlreadyExists = errAlreadyExists{} + ErrPermissionDenied = errPermissionDenied{} + ErrResourceExhausted = errResourceExhausted{} + ErrFailedPrecondition = errFailedPrecondition{} + ErrConflict = errConflict{} + ErrNotModified = errNotModified{} + ErrAborted = errAborted{} + ErrOutOfRange = errOutOfRange{} + ErrNotImplemented = errNotImplemented{} + ErrInternal = errInternal{} + ErrUnavailable = errUnavailable{} + ErrDataLoss = errDataLoss{} + ErrUnauthenticated = errUnauthorized{} ) +// cancelled maps to Moby's "ErrCancelled" +type cancelled interface { + Cancelled() +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) || isInterface[cancelled](err) +} + +type errUnknown struct{} + +func (errUnknown) Error() string { return "unknown" } + +func (errUnknown) Unknown() {} + +func (e errUnknown) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// unknown maps to Moby's "ErrUnknown" +type unknown interface { + Unknown() +} + +// IsUnknown returns true if the error is due to an unknown error, +// unhandled condition or unexpected response. +func IsUnknown(err error) bool { + return errors.Is(err, errUnknown{}) || isInterface[unknown](err) +} + +type errInvalidArgument struct{} + +func (errInvalidArgument) Error() string { return "invalid argument" } + +func (errInvalidArgument) InvalidParameter() {} + +func (e errInvalidArgument) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// invalidParameter maps to Moby's "ErrInvalidParameter" +type invalidParameter interface { + InvalidParameter() +} + // IsInvalidArgument returns true if the error is due to an invalid argument func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) + return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err) +} + +// deadlineExceed maps to Moby's "ErrDeadline" +type deadlineExceeded interface { + DeadlineExceeded() +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err) +} + +type errNotFound struct{} + +func (errNotFound) Error() string { return "not found" } + +func (errNotFound) NotFound() {} + +func (e errNotFound) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// notFound maps to Moby's "ErrNotFound" +type notFound interface { + NotFound() } // IsNotFound returns true if the error is due to a missing object func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) + return errors.Is(err, ErrNotFound) || isInterface[notFound](err) +} + +type errAlreadyExists struct{} + +func (errAlreadyExists) Error() string { return "already exists" } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type alreadyExists interface { + AlreadyExists() } // IsAlreadyExists returns true if the error is due to an already existing // metadata item func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) + return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err) +} + +type errPermissionDenied struct{} + +func (errPermissionDenied) Error() string { return "permission denied" } + +func (errPermissionDenied) Forbidden() {} + +func (e errPermissionDenied) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// forbidden maps to Moby's "ErrForbidden" +type forbidden interface { + Forbidden() +} + +// IsPermissionDenied returns true if the error is due to permission denied +// or forbidden (403) response +func IsPermissionDenied(err error) bool { + return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err) +} + +type errResourceExhausted struct{} + +func (errResourceExhausted) Error() string { return "resource exhausted" } + +func (errResourceExhausted) ResourceExhausted() {} + +func (e errResourceExhausted) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type resourceExhausted interface { + ResourceExhausted() +} + +// IsResourceExhausted returns true if the error is due to +// a lack of resources or too many attempts. +func IsResourceExhausted(err error) bool { + return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err) +} + +type errFailedPrecondition struct{} + +func (e errFailedPrecondition) Error() string { return "failed precondition" } + +func (errFailedPrecondition) FailedPrecondition() {} + +func (e errFailedPrecondition) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type failedPrecondition interface { + FailedPrecondition() } -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition +// IsFailedPrecondition returns true if an operation could not proceed due to +// the lack of a particular condition func IsFailedPrecondition(err error) bool { - return errors.Is(err, ErrFailedPrecondition) + return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err) } -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, ErrUnavailable) +type errConflict struct{} + +func (errConflict) Error() string { return "conflict" } + +func (errConflict) Conflict() {} + +func (e errConflict) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// conflict maps to Moby's "ErrConflict" +type conflict interface { + Conflict() +} + +// IsConflict returns true if an operation could not proceed due to +// a conflict. +func IsConflict(err error) bool { + return errors.Is(err, errConflict{}) || isInterface[conflict](err) +} + +type errNotModified struct{} + +func (errNotModified) Error() string { return "not modified" } + +func (errNotModified) NotModified() {} + +func (e errNotModified) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// notModified maps to Moby's "ErrNotModified" +type notModified interface { + NotModified() +} + +// IsNotModified returns true if an operation could not proceed due +// to an object not modified from a previous state. +func IsNotModified(err error) bool { + return errors.Is(err, errNotModified{}) || isInterface[notModified](err) +} + +type errAborted struct{} + +func (errAborted) Error() string { return "aborted" } + +func (errAborted) Aborted() {} + +func (e errAborted) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type aborted interface { + Aborted() +} + +// IsAborted returns true if an operation was aborted. +func IsAborted(err error) bool { + return errors.Is(err, errAborted{}) || isInterface[aborted](err) +} + +type errOutOfRange struct{} + +func (errOutOfRange) Error() string { return "out of range" } + +func (errOutOfRange) OutOfRange() {} + +func (e errOutOfRange) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type outOfRange interface { + OutOfRange() +} + +// IsOutOfRange returns true if an operation could not proceed due +// to data being out of the expected range. +func IsOutOfRange(err error) bool { + return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err) +} + +type errNotImplemented struct{} + +func (errNotImplemented) Error() string { return "not implemented" } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// notImplemented maps to Moby's "ErrNotImplemented" +type notImplemented interface { + NotImplemented() } // IsNotImplemented returns true if the error is due to not being implemented func IsNotImplemented(err error) bool { - return errors.Is(err, ErrNotImplemented) + return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err) } -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) +type errInternal struct{} + +func (errInternal) Error() string { return "internal" } + +func (errInternal) System() {} + +func (e errInternal) WithMessage(msg string) error { + return customMessage{e, msg} } -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) +// system maps to Moby's "ErrSystem" +type system interface { + System() +} + +// IsInternal returns true if the error returns to an internal or system error +func IsInternal(err error) bool { + return errors.Is(err, errInternal{}) || isInterface[system](err) +} + +type errUnavailable struct{} + +func (errUnavailable) Error() string { return "unavailable" } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// unavailable maps to Moby's "ErrUnavailable" +type unavailable interface { + Unavailable() +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err) +} + +type errDataLoss struct{} + +func (errDataLoss) Error() string { return "data loss" } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// dataLoss maps to Moby's "ErrDataLoss" +type dataLoss interface { + DataLoss() +} + +// IsDataLoss returns true if data during an operation was lost or corrupted +func IsDataLoss(err error) bool { + return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err) +} + +type errUnauthorized struct{} + +func (errUnauthorized) Error() string { return "unauthorized" } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// unauthorized maps to Moby's "ErrUnauthorized" +type unauthorized interface { + Unauthorized() +} + +// IsUnauthorized returns true if the error indicates that the user was +// unauthenticated or unauthorized. +func IsUnauthorized(err error) bool { + return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err) +} + +func isInterface[T any](err error) bool { + for { + switch x := err.(type) { + case T: + return true + case customMessage: + err = x.err + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return false + } + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + if isInterface[T](err) { + return true + } + } + return false + default: + return false + } + } +} + +// customMessage is used to provide a defined error with a custom message. +// The message is not wrapped but can be compared by the `Is(error) bool` interface. +type customMessage struct { + err error + msg string +} + +func (c customMessage) Is(err error) bool { + return c.err == err +} + +func (c customMessage) As(target any) bool { + return errors.As(c.err, target) +} + +func (c customMessage) Error() string { + return c.msg } diff --git a/vendor/github.com/containerd/errdefs/grpc.go b/vendor/github.com/containerd/errdefs/grpc.go deleted file mode 100644 index 7a9b33e05..000000000 --- a/vendor/github.com/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = fmt.Errorf("%s: %w", msg, cls) - } else { - err = cls - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/errdefs/pkg/LICENSE similarity index 100% rename from vendor/github.com/containerd/containerd/LICENSE rename to vendor/github.com/containerd/errdefs/pkg/LICENSE diff --git a/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go b/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go new file mode 100644 index 000000000..59577595a --- /dev/null +++ b/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go @@ -0,0 +1,353 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errgrpc provides utility functions for translating errors to +// and from a gRPC context. +// +// The functions ToGRPC and ToNative can be used to map server-side and +// client-side errors to the correct types. +package errgrpc + +import ( + "context" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/containerd/typeurl/v2" + + "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/internal/cause" + "github.com/containerd/errdefs/pkg/internal/types" +) + +// ToGRPC will attempt to map the error into a grpc error, from the error types +// defined in the the errdefs package and attempign to preserve the original +// description. Any type which does not resolve to a defined error type will +// be assigned the unknown error code. +// +// Further information may be extracted from certain errors depending on their +// type. The grpc error details will be used to attempt to preserve as much of +// the error structures and types as possible. +// +// Errors which can be marshaled using protobuf or typeurl will be considered +// for including as GRPC error details. +// Additionally, use the following interfaces in errors to preserve custom types: +// +// WrapError(error) error - Used to wrap the previous error +// JoinErrors(...error) error - Used to join all previous errors +// CollapseError() - Used for errors which carry information but +// should not have their error message shown. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if _, ok := status.FromError(err); ok { + // error has already been mapped to grpc + return err + } + st := statusFromError(err) + if st != nil { + if details := errorDetails(err, false); len(details) > 0 { + if ds, _ := st.WithDetails(details...); ds != nil { + st = ds + } + } + err = st.Err() + } + return err +} + +func statusFromError(err error) *status.Status { + switch errdefs.Resolve(err) { + case errdefs.ErrInvalidArgument: + return status.New(codes.InvalidArgument, err.Error()) + case errdefs.ErrNotFound: + return status.New(codes.NotFound, err.Error()) + case errdefs.ErrAlreadyExists: + return status.New(codes.AlreadyExists, err.Error()) + case errdefs.ErrPermissionDenied: + return status.New(codes.PermissionDenied, err.Error()) + case errdefs.ErrResourceExhausted: + return status.New(codes.ResourceExhausted, err.Error()) + case errdefs.ErrFailedPrecondition, errdefs.ErrConflict, errdefs.ErrNotModified: + return status.New(codes.FailedPrecondition, err.Error()) + case errdefs.ErrAborted: + return status.New(codes.Aborted, err.Error()) + case errdefs.ErrOutOfRange: + return status.New(codes.OutOfRange, err.Error()) + case errdefs.ErrNotImplemented: + return status.New(codes.Unimplemented, err.Error()) + case errdefs.ErrInternal: + return status.New(codes.Internal, err.Error()) + case errdefs.ErrUnavailable: + return status.New(codes.Unavailable, err.Error()) + case errdefs.ErrDataLoss: + return status.New(codes.DataLoss, err.Error()) + case errdefs.ErrUnauthenticated: + return status.New(codes.Unauthenticated, err.Error()) + case context.DeadlineExceeded: + return status.New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.New(codes.Canceled, err.Error()) + case errdefs.ErrUnknown: + return status.New(codes.Unknown, err.Error()) + } + return nil +} + +// errorDetails returns an array of errors which make up the provided error. +// If firstIncluded is true, then all encodable errors will be used, otherwise +// the first error in an error list will be not be used, to account for the +// the base status error which details are added to via wrap or join. +// +// The errors are ordered in way that they can be applied in order by either +// wrapping or joining the errors to recreate an error with the same structure +// when `WrapError` and `JoinErrors` interfaces are used. +// +// The intent is that when re-applying the errors to create a single error, the +// results of calls to `Error()`, `errors.Is`, `errors.As`, and "%+v" formatting +// is the same as the original error. +func errorDetails(err error, firstIncluded bool) []protoadapt.MessageV1 { + switch uerr := err.(type) { + case interface{ Unwrap() error }: + details := errorDetails(uerr.Unwrap(), firstIncluded) + + // If the type is able to wrap, then include if proto + if _, ok := err.(interface{ WrapError(error) error }); ok { + // Get proto message + if protoErr := toProtoMessage(err); protoErr != nil { + details = append(details, protoErr) + } + } + + return details + case interface{ Unwrap() []error }: + var details []protoadapt.MessageV1 + for i, e := range uerr.Unwrap() { + details = append(details, errorDetails(e, firstIncluded || i > 0)...) + } + + if _, ok := err.(interface{ JoinErrors(...error) error }); ok { + // Get proto message + if protoErr := toProtoMessage(err); protoErr != nil { + details = append(details, protoErr) + } + } + return details + } + + if firstIncluded { + if protoErr := toProtoMessage(err); protoErr != nil { + return []protoadapt.MessageV1{protoErr} + } + if gs, ok := status.FromError(ToGRPC(err)); ok { + return []protoadapt.MessageV1{gs.Proto()} + } + // TODO: Else include unknown extra error type? + } + + return nil +} + +func toProtoMessage(err error) protoadapt.MessageV1 { + // Do not double encode proto messages, otherwise use Any + if pm, ok := err.(protoadapt.MessageV1); ok { + return pm + } + if pm, ok := err.(proto.Message); ok { + return protoadapt.MessageV1Of(pm) + } + + if reflect.TypeOf(err).Kind() == reflect.Ptr { + a, aerr := typeurl.MarshalAny(err) + if aerr == nil { + return &anypb.Any{ + TypeUrl: a.GetTypeUrl(), + Value: a.GetValue(), + } + } + } + return nil +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to grpc.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// ToNative returns the underlying error from a grpc service based on the grpc +// error code. The grpc details are used to add wrap the error in more context +// or support multiple errors. +func ToNative(err error) error { + if err == nil { + return nil + } + + s, isGRPC := status.FromError(err) + + var ( + desc string + code codes.Code + ) + + if isGRPC { + desc = s.Message() + code = s.Code() + } else { + desc = err.Error() + code = codes.Unknown + } + + var cls error // divide these into error classes, becomes the cause + + switch code { + case codes.InvalidArgument: + cls = errdefs.ErrInvalidArgument + case codes.AlreadyExists: + cls = errdefs.ErrAlreadyExists + case codes.NotFound: + cls = errdefs.ErrNotFound + case codes.Unavailable: + cls = errdefs.ErrUnavailable + case codes.FailedPrecondition: + // TODO: Has suffix is not sufficient for conflict and not modified + // Message should start with ": " or be at beginning of a line + // Message should end with ": " or be at the end of a line + // Compile a regex + if desc == errdefs.ErrConflict.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrConflict.Error()) { + cls = errdefs.ErrConflict + } else if desc == errdefs.ErrNotModified.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrNotModified.Error()) { + cls = errdefs.ErrNotModified + } else { + cls = errdefs.ErrFailedPrecondition + } + case codes.Unimplemented: + cls = errdefs.ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + case codes.Aborted: + cls = errdefs.ErrAborted + case codes.Unauthenticated: + cls = errdefs.ErrUnauthenticated + case codes.PermissionDenied: + cls = errdefs.ErrPermissionDenied + case codes.Internal: + cls = errdefs.ErrInternal + case codes.DataLoss: + cls = errdefs.ErrDataLoss + case codes.OutOfRange: + cls = errdefs.ErrOutOfRange + case codes.ResourceExhausted: + cls = errdefs.ErrResourceExhausted + default: + if idx := strings.LastIndex(desc, cause.UnexpectedStatusPrefix); idx > 0 { + if status, uerr := strconv.Atoi(desc[idx+len(cause.UnexpectedStatusPrefix):]); uerr == nil && status >= 200 && status < 600 { + cls = cause.ErrUnexpectedStatus{Status: status} + } + } + if cls == nil { + cls = errdefs.ErrUnknown + } + } + + msg := rebaseMessage(cls, desc) + if msg == "" { + err = cls + } else if msg != desc { + err = fmt.Errorf("%s: %w", msg, cls) + } else if wm, ok := cls.(interface{ WithMessage(string) error }); ok { + err = wm.WithMessage(msg) + } else { + err = fmt.Errorf("%s: %w", msg, cls) + } + + if isGRPC { + errs := []error{err} + for _, a := range s.Details() { + var derr error + + // First decode error if needed + if s, ok := a.(*spb.Status); ok { + derr = ToNative(status.ErrorProto(s)) + } else if e, ok := a.(error); ok { + derr = e + } else if dany, ok := a.(typeurl.Any); ok { + i, uerr := typeurl.UnmarshalAny(dany) + if uerr == nil { + if e, ok = i.(error); ok { + derr = e + } else { + derr = fmt.Errorf("non-error unmarshalled detail: %v", i) + } + } else { + derr = fmt.Errorf("error of type %q with failure to unmarshal: %v", dany.GetTypeUrl(), uerr) + } + } else { + derr = fmt.Errorf("non-error detail: %v", a) + } + + switch werr := derr.(type) { + case interface{ WrapError(error) error }: + errs[len(errs)-1] = werr.WrapError(errs[len(errs)-1]) + case interface{ JoinErrors(...error) error }: + // TODO: Consider whether this should support joining a subset + errs[0] = werr.JoinErrors(errs...) + case interface{ CollapseError() }: + errs[len(errs)-1] = types.CollapsedError(errs[len(errs)-1], derr) + default: + errs = append(errs, derr) + } + + } + if len(errs) > 1 { + err = errors.Join(errs...) + } else { + err = errs[0] + } + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, desc string) string { + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go similarity index 60% rename from vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go rename to vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go index c67f773d0..d88756bb0 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go @@ -1,5 +1,3 @@ -//go:build !linux - /* Copyright The containerd Authors. @@ -16,10 +14,20 @@ limitations under the License. */ -package userns +// Package cause is used to define root causes for errors +// common to errors packages like grpc and http. +package cause + +import "fmt" + +type ErrUnexpectedStatus struct { + Status int +} + +const UnexpectedStatusPrefix = "unexpected status " -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false +func (e ErrUnexpectedStatus) Error() string { + return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status) } + +func (ErrUnexpectedStatus) Unknown() {} diff --git a/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go b/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go new file mode 100644 index 000000000..a37e7722a --- /dev/null +++ b/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go @@ -0,0 +1,57 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "fmt" + +// CollapsibleError indicates the error should be collapsed +type CollapsibleError interface { + CollapseError() +} + +// CollapsedError returns a new error with the collapsed +// error returned on unwrapped or when formatted with "%+v" +func CollapsedError(err error, collapsed ...error) error { + return collapsedError{err, collapsed} +} + +type collapsedError struct { + error + collapsed []error +} + +func (c collapsedError) Unwrap() []error { + return append([]error{c.error}, c.collapsed...) +} + +func (c collapsedError) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", c.error) + for _, err := range c.collapsed { + fmt.Fprintf(s, "\n%+v", err) + } + return + } + fallthrough + case 's': + fmt.Fprint(s, c.Error()) + case 'q': + fmt.Fprintf(s, "%q", c.Error()) + } +} diff --git a/vendor/github.com/containerd/errdefs/resolve.go b/vendor/github.com/containerd/errdefs/resolve.go new file mode 100644 index 000000000..c02d4a73f --- /dev/null +++ b/vendor/github.com/containerd/errdefs/resolve.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import "context" + +// Resolve returns the first error found in the error chain which matches an +// error defined in this package or context error. A raw, unwrapped error is +// returned or ErrUnknown if no matching error is found. +// +// This is useful for determining a response code based on the outermost wrapped +// error rather than the original cause. For example, a not found error deep +// in the code may be wrapped as an invalid argument. When determining status +// code from Is* functions, the depth or ordering of the error is not +// considered. +// +// The search order is depth first, a wrapped error returned from any part of +// the chain from `Unwrap() error` will be returned before any joined errors +// as returned by `Unwrap() []error`. +func Resolve(err error) error { + if err == nil { + return nil + } + err = firstError(err) + if err == nil { + err = ErrUnknown + } + return err +} + +func firstError(err error) error { + for { + switch err { + case ErrUnknown, + ErrInvalidArgument, + ErrNotFound, + ErrAlreadyExists, + ErrPermissionDenied, + ErrResourceExhausted, + ErrFailedPrecondition, + ErrConflict, + ErrNotModified, + ErrAborted, + ErrOutOfRange, + ErrNotImplemented, + ErrInternal, + ErrUnavailable, + ErrDataLoss, + ErrUnauthenticated, + context.DeadlineExceeded, + context.Canceled: + return err + } + switch e := err.(type) { + case customMessage: + err = e.err + case unknown: + return ErrUnknown + case invalidParameter: + return ErrInvalidArgument + case notFound: + return ErrNotFound + case alreadyExists: + return ErrAlreadyExists + case forbidden: + return ErrPermissionDenied + case resourceExhausted: + return ErrResourceExhausted + case failedPrecondition: + return ErrFailedPrecondition + case conflict: + return ErrConflict + case notModified: + return ErrNotModified + case aborted: + return ErrAborted + case errOutOfRange: + return ErrOutOfRange + case notImplemented: + return ErrNotImplemented + case system: + return ErrInternal + case unavailable: + return ErrUnavailable + case dataLoss: + return ErrDataLoss + case unauthorized: + return ErrUnauthenticated + case deadlineExceeded: + return context.DeadlineExceeded + case cancelled: + return context.Canceled + case interface{ Unwrap() error }: + err = e.Unwrap() + if err == nil { + return nil + } + case interface{ Unwrap() []error }: + for _, ue := range e.Unwrap() { + if fe := firstError(ue); fe != nil { + return fe + } + } + return nil + case interface{ Is(error) bool }: + for _, target := range []error{ErrUnknown, + ErrInvalidArgument, + ErrNotFound, + ErrAlreadyExists, + ErrPermissionDenied, + ErrResourceExhausted, + ErrFailedPrecondition, + ErrConflict, + ErrNotModified, + ErrAborted, + ErrOutOfRange, + ErrNotImplemented, + ErrInternal, + ErrUnavailable, + ErrDataLoss, + ErrUnauthenticated, + context.DeadlineExceeded, + context.Canceled} { + if e.Is(target) { + return target + } + } + return nil + default: + return nil + } + } +} diff --git a/vendor/github.com/containerd/platforms/.gitattributes b/vendor/github.com/containerd/platforms/.gitattributes new file mode 100644 index 000000000..a0717e4b3 --- /dev/null +++ b/vendor/github.com/containerd/platforms/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/.golangci.yml b/vendor/github.com/containerd/platforms/.golangci.yml new file mode 100644 index 000000000..a695775df --- /dev/null +++ b/vendor/github.com/containerd/platforms/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/platforms/LICENSE b/vendor/github.com/containerd/platforms/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/vendor/github.com/containerd/platforms/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/platforms/README.md b/vendor/github.com/containerd/platforms/README.md new file mode 100644 index 000000000..2059de771 --- /dev/null +++ b/vendor/github.com/containerd/platforms/README.md @@ -0,0 +1,32 @@ +# platforms + +A Go package for formatting, normalizing and matching container platforms. + +This package is based on the Open Containers Image Spec definition of a [platform](https://github.com/opencontainers/image-spec/blob/main/specs-go/v1/descriptor.go#L52). + +## Platform Specifier + +While the OCI platform specifications provide a tool for components to +specify structured information, user input typically doesn't need the full +context and much can be inferred. To solve this problem, this package introduces +"specifiers". A specifier has the format +`||/[/]`. The user can provide either the +operating system or the architecture or both. + +An example of a common specifier is `linux/amd64`. If the host has a default +runtime that matches this, the user can simply provide the component that +matters. For example, if an image provides `amd64` and `arm64` support, the +operating system, `linux` can be inferred, so they only have to provide +`arm64` or `amd64`. Similar behavior is implemented for operating systems, +where the architecture may be known but a runtime may support images from +different operating systems. + +## Project details + +**platforms** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. \ No newline at end of file diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go similarity index 100% rename from vendor/github.com/containerd/containerd/platforms/compare.go rename to vendor/github.com/containerd/platforms/compare.go diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/platforms/cpuinfo.go similarity index 100% rename from vendor/github.com/containerd/containerd/platforms/cpuinfo.go rename to vendor/github.com/containerd/platforms/cpuinfo.go diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/platforms/cpuinfo_linux.go similarity index 94% rename from vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go rename to vendor/github.com/containerd/platforms/cpuinfo_linux.go index e07aa99cc..98c7001f9 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go +++ b/vendor/github.com/containerd/platforms/cpuinfo_linux.go @@ -19,12 +19,12 @@ package platforms import ( "bufio" "bytes" + "errors" "fmt" "os" "runtime" "strings" - "github.com/containerd/errdefs" "golang.org/x/sys/unix" ) @@ -70,7 +70,7 @@ func getCPUInfo(pattern string) (info string, err error) { return "", err } - return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errdefs.ErrNotFound) + return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errNotFound) } // getCPUVariantFromArch get CPU variant from arch through a system call @@ -83,7 +83,7 @@ func getCPUVariantFromArch(arch string) (string, error) { if arch == "aarch64" { variant = "8" } else if arch[0:4] == "armv" && len(arch) >= 5 { - //Valid arch format is in form of armvXx + // Valid arch format is in form of armvXx switch arch[3:5] { case "v8": variant = "8" @@ -101,7 +101,7 @@ func getCPUVariantFromArch(arch string) (string, error) { variant = "unknown" } } else { - return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errdefs.ErrInvalidArgument) + return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errInvalidArgument) } return variant, nil } @@ -112,11 +112,10 @@ func getCPUVariantFromArch(arch string) (string, error) { // This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo // was not present. func getCPUVariant() (string, error) { - variant, err := getCPUInfo("Cpu architecture") if err != nil { - if errdefs.IsNotFound(err) { - //Let's try getting CPU variant from machine architecture + if errors.Is(err, errNotFound) { + // Let's try getting CPU variant from machine architecture arch, err := getMachineArch() if err != nil { return "", fmt.Errorf("failure getting machine architecture: %v", err) diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/platforms/cpuinfo_other.go similarity index 95% rename from vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go rename to vendor/github.com/containerd/platforms/cpuinfo_other.go index 8cbcbb24a..97a1fe8a3 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go +++ b/vendor/github.com/containerd/platforms/cpuinfo_other.go @@ -21,8 +21,6 @@ package platforms import ( "fmt" "runtime" - - "github.com/containerd/errdefs" ) func getCPUVariant() (string, error) { @@ -49,10 +47,8 @@ func getCPUVariant() (string, error) { default: variant = "unknown" } - } else { - return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errdefs.ErrNotImplemented) - + return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errNotImplemented) } return variant, nil diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go similarity index 100% rename from vendor/github.com/containerd/containerd/platforms/database.go rename to vendor/github.com/containerd/platforms/database.go diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/platforms/defaults.go similarity index 81% rename from vendor/github.com/containerd/containerd/platforms/defaults.go rename to vendor/github.com/containerd/platforms/defaults.go index cfa3ff34a..9d898d60e 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/vendor/github.com/containerd/platforms/defaults.go @@ -16,9 +16,11 @@ package platforms -// DefaultString returns the default string specifier for the platform. +// DefaultString returns the default string specifier for the platform, +// with [PR#6](https://github.com/containerd/platforms/pull/6) the result +// may now also include the OSVersion from the provided platform specification. func DefaultString() string { - return Format(DefaultSpec()) + return FormatAll(DefaultSpec()) } // DefaultStrict returns strict form of Default. diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/platforms/defaults_darwin.go similarity index 100% rename from vendor/github.com/containerd/containerd/platforms/defaults_darwin.go rename to vendor/github.com/containerd/platforms/defaults_darwin.go diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/platforms/defaults_freebsd.go similarity index 100% rename from vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go rename to vendor/github.com/containerd/platforms/defaults_freebsd.go diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/platforms/defaults_unix.go similarity index 100% rename from vendor/github.com/containerd/containerd/platforms/defaults_unix.go rename to vendor/github.com/containerd/platforms/defaults_unix.go diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go similarity index 88% rename from vendor/github.com/containerd/containerd/platforms/defaults_windows.go rename to vendor/github.com/containerd/platforms/defaults_windows.go index d10fa9012..427ed72eb 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/platforms/defaults_windows.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/Microsoft/hcsshim/osversion" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" ) @@ -52,29 +51,29 @@ func (m windowsmatcher) Match(p specs.Platform) bool { if match && m.OS == "windows" { // HPC containers do not have OS version filled - if p.OSVersion == "" { + if m.OSVersion == "" || p.OSVersion == "" { return true } - hostOsVersion := GetOsVersion(m.osVersionPrefix) - ctrOsVersion := GetOsVersion(p.OSVersion) - return osversion.CheckHostAndContainerCompat(hostOsVersion, ctrOsVersion) + hostOsVersion := getOSVersion(m.osVersionPrefix) + ctrOsVersion := getOSVersion(p.OSVersion) + return checkHostAndContainerCompat(hostOsVersion, ctrOsVersion) } return match } -func GetOsVersion(osVersionPrefix string) osversion.OSVersion { +func getOSVersion(osVersionPrefix string) osVersion { parts := strings.Split(osVersionPrefix, ".") if len(parts) < 3 { - return osversion.OSVersion{} + return osVersion{} } majorVersion, _ := strconv.Atoi(parts[0]) minorVersion, _ := strconv.Atoi(parts[1]) buildNumber, _ := strconv.Atoi(parts[2]) - return osversion.OSVersion{ + return osVersion{ MajorVersion: uint8(majorVersion), MinorVersion: uint8(minorVersion), Build: uint16(buildNumber), diff --git a/vendor/github.com/containerd/platforms/errors.go b/vendor/github.com/containerd/platforms/errors.go new file mode 100644 index 000000000..5ad721e77 --- /dev/null +++ b/vendor/github.com/containerd/platforms/errors.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import "errors" + +// These errors mirror the errors defined in [github.com/containerd/containerd/errdefs], +// however, they are not exported as they are not expected to be used as sentinel +// errors by consumers of this package. +// +//nolint:unused // not all errors are used on all platforms. +var ( + errNotFound = errors.New("not found") + errInvalidArgument = errors.New("invalid argument") + errNotImplemented = errors.New("not implemented") +) diff --git a/vendor/github.com/containerd/platforms/platform_compat_windows.go b/vendor/github.com/containerd/platforms/platform_compat_windows.go new file mode 100644 index 000000000..89e66f0c0 --- /dev/null +++ b/vendor/github.com/containerd/platforms/platform_compat_windows.go @@ -0,0 +1,78 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// osVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type osVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// Windows Client and Server build numbers. +// +// See: +// https://learn.microsoft.com/en-us/windows/release-health/release-information +// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info +// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information +const ( + // rs5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + rs5 = 17763 + + // v21H2Server corresponds to Windows Server 2022 (ltsc2022). + v21H2Server = 20348 + + // v22H2Win11 corresponds to Windows 11 (2022 Update). + v22H2Win11 = 22621 +) + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + v21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func checkHostAndContainerCompat(host, ctr osVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < v21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go similarity index 74% rename from vendor/github.com/containerd/containerd/platforms/platforms.go rename to vendor/github.com/containerd/platforms/platforms.go index 44bc24a5c..1bbbdb91d 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/platforms/platforms.go @@ -102,6 +102,9 @@ // unless it is explicitly provided. This is treated as equivalent to armhf. A // previous architecture, armel, will be normalized to arm/v6. // +// Similarly, the most common arm64 version v8, and most common amd64 version v1 +// are represented without the variant. +// // While these normalizations are provided, their support on arm platforms has // not yet been fully implemented and tested. package platforms @@ -115,14 +118,15 @@ import ( "strings" specs "github.com/opencontainers/image-spec/specs-go/v1" - - "github.com/containerd/errdefs" ) var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) + osAndVersionRe = regexp.MustCompile(`^([A-Za-z0-9_-]+)(?:\(([A-Za-z0-9_.-]*)\))?$`) ) +const osAndVersionFormat = "%s(%s)" + // Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. type Platform = specs.Platform @@ -155,40 +159,68 @@ func (m *matcher) Match(platform specs.Platform) bool { } func (m *matcher) String() string { - return Format(m.Platform) + return FormatAll(m.Platform) +} + +// ParseAll parses a list of platform specifiers into a list of platform. +func ParseAll(specifiers []string) ([]specs.Platform, error) { + platforms := make([]specs.Platform, len(specifiers)) + for i, s := range specifiers { + p, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("invalid platform %s: %w", s, err) + } + platforms[i] = p + } + return platforms, nil } // Parse parses the platform specifier syntax into a platform declaration. // -// Platform specifiers are in the format `||/[/]`. +// Platform specifiers are in the format `[()]||[()]/[/]`. // The minimum required information for a platform specifier is the operating -// system or architecture. If there is only a single string (no slashes), the +// system or architecture. The OSVersion can be part of the OS like `windows(10.0.17763)` +// When an OSVersion is specified, then specs.Platform.OSVersion is populated with that value, +// and an empty string otherwise. +// If there is only a single string (no slashes), the // value will be matched against the known set of operating systems, then fall // back to the known set of architectures. The missing component will be // inferred based on the local environment. func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errInvalidArgument) } - parts := strings.Split(specifier, "/") + // Limit to 4 elements to prevent unbounded split + parts := strings.SplitN(specifier, "/", 4) - for _, part := range parts { - if !specifierRe.MatchString(part) { - return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) + var p specs.Platform + for i, part := range parts { + if i == 0 { + // First element is [()] + osVer := osAndVersionRe.FindStringSubmatch(part) + if osVer == nil { + return specs.Platform{}, fmt.Errorf("%q is an invalid OS component of %q: OSAndVersion specifier component must match %q: %w", part, specifier, osAndVersionRe.String(), errInvalidArgument) + } + + p.OS = normalizeOS(osVer[1]) + p.OSVersion = osVer[2] + } else { + if !specifierRe.MatchString(part) { + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errInvalidArgument) + } } } - var p specs.Platform switch len(parts) { case 1: - // in this case, we will test that the value might be an OS, then look - // it up. If it is not known, we'll treat it as an architecture. Since + // in this case, we will test that the value might be an OS (with or + // without the optional OSVersion specified) and look it up. + // If it is not known, we'll treat it as an architecture. Since // we have very little information about the platform here, we are // going to be a little more strict if we don't know about the argument // value. - p.OS = normalizeOS(parts[0]) if isKnownOS(p.OS) { // picks a default architecture p.Architecture = runtime.GOARCH @@ -196,10 +228,6 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = cpuVariant() } - if p.OS == "windows" { - p.OSVersion = GetWindowsOsVersion() - } - return p, nil } @@ -212,37 +240,27 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errInvalidArgument) case 2: - // In this case, we treat as a regular os/arch pair. We don't care + // In this case, we treat as a regular OS[(OSVersion)]/arch pair. We don't care // about whether or not we know of the platform. - p.OS = normalizeOS(parts[0]) p.Architecture, p.Variant = normalizeArch(parts[1], "") if p.Architecture == "arm" && p.Variant == "v7" { p.Variant = "" } - if p.OS == "windows" { - p.OSVersion = GetWindowsOsVersion() - } - return p, nil case 3: // we have a fully specified variant, this is rare - p.OS = normalizeOS(parts[0]) p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) if p.Architecture == "arm64" && p.Variant == "" { p.Variant = "v8" } - if p.OS == "windows" { - p.OSVersion = GetWindowsOsVersion() - } - return p, nil } - return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errInvalidArgument) } // MustParse is like Parses but panics if the specifier cannot be parsed. @@ -264,6 +282,20 @@ func Format(platform specs.Platform) string { return path.Join(platform.OS, platform.Architecture, platform.Variant) } +// FormatAll returns a string specifier that also includes the OSVersion from the +// provided platform specification. +func FormatAll(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + if platform.OSVersion != "" { + OSAndVersion := fmt.Sprintf(osAndVersionFormat, platform.OS, platform.OSVersion) + return path.Join(OSAndVersion, platform.Architecture, platform.Variant) + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + // Normalize validates and translate the platform to the canonical value. // // For example, if "Aarch64" is encountered, we change it to "arm64" or if diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/platforms/platforms_other.go similarity index 94% rename from vendor/github.com/containerd/containerd/platforms/platforms_other.go rename to vendor/github.com/containerd/platforms/platforms_other.go index 59beeb3d1..03f4dcd99 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms_other.go +++ b/vendor/github.com/containerd/platforms/platforms_other.go @@ -28,7 +28,3 @@ func newDefaultMatcher(platform specs.Platform) Matcher { Platform: Normalize(platform), } } - -func GetWindowsOsVersion() string { - return "" -} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/platforms/platforms_windows.go similarity index 85% rename from vendor/github.com/containerd/containerd/platforms/platforms_windows.go rename to vendor/github.com/containerd/platforms/platforms_windows.go index 733d18dde..950e2a2dd 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go +++ b/vendor/github.com/containerd/platforms/platforms_windows.go @@ -17,10 +17,7 @@ package platforms import ( - "fmt" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sys/windows" ) // NewMatcher returns a Windows matcher that will match on osVersionPrefix if @@ -35,8 +32,3 @@ func newDefaultMatcher(platform specs.Platform) Matcher { }, } } - -func GetWindowsOsVersion() string { - major, minor, build := windows.RtlGetNtVersionNumbers() - return fmt.Sprintf("%d.%d.%d", major, minor, build) -} diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md index e3d0742f4..8d86600a4 100644 --- a/vendor/github.com/containerd/typeurl/v2/README.md +++ b/vendor/github.com/containerd/typeurl/v2/README.md @@ -2,7 +2,7 @@ [![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) [![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) +[![codecov](https://codecov.io/gh/containerd/typeurl/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) A Go package for managing the registration, marshaling, and unmarshaling of encoded types. @@ -13,8 +13,8 @@ This package helps when types are sent over a ttrpc/GRPC API and marshaled as a **typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go index 8d6665bb5..78817b701 100644 --- a/vendor/github.com/containerd/typeurl/v2/types.go +++ b/vendor/github.com/containerd/typeurl/v2/types.go @@ -27,6 +27,7 @@ import ( gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/anypb" ) var ( @@ -122,6 +123,9 @@ func TypeURL(v interface{}) (string, error) { // Is returns true if the type of the Any is the same as v. func Is(any Any, v interface{}) bool { + if any == nil { + return false + } // call to check that v is a pointer tryDereference(v) url, err := TypeURL(v) @@ -193,6 +197,31 @@ func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { return err } +// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any. +func MarshalProto(from Any) *anypb.Any { + if from == nil { + return nil + } + + if pbany, ok := from.(*anypb.Any); ok { + return pbany + } + + return &anypb.Any{ + TypeUrl: from.GetTypeUrl(), + Value: from.GetValue(), + } +} + +// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any. +func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { + anyType, err := MarshalAny(from) + if err != nil { + return nil, err + } + return MarshalProto(anyType), nil +} + func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { t, err := getTypeByUrl(typeURL) if err != nil { diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 29b62eada..32ecc0f79 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -27,12 +27,12 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-39" - PRIOR_FEDORA_NAME: "fedora-38" + FEDORA_NAME: "fedora-41" + PRIOR_FEDORA_NAME: "fedora-40" DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20240708t152000z-f40f39d13" + IMAGE_SUFFIX: "c20241106t163000z-f41f40d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -208,10 +208,11 @@ integration_task: DISTRO_NV: "${FEDORA_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'vfs' + # Disabled until we update to f40/41 as f39 does not have go 1.22 + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'vfs' - env: DISTRO_NV: "${DEBIAN_NAME}" IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}" @@ -221,10 +222,11 @@ integration_task: DISTRO_NV: "${FEDORA_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' + # Disabled until we update to f40/41 as f39 does not have go 1.22 + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'overlay' - env: DISTRO_NV: "${DEBIAN_NAME}" IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}" @@ -265,11 +267,12 @@ integration_rootless_task: IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' PRIV_NAME: rootless - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' - PRIV_NAME: rootless + # Disabled until we update to f40/41 as f39 does not have go 1.22 + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'overlay' + # PRIV_NAME: rootless - env: DISTRO_NV: "${DEBIAN_NAME}" IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}" diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml index af0b10c76..8690fb4ad 100644 --- a/vendor/github.com/containers/buildah/.golangci.yml +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -8,6 +8,9 @@ run: concurrency: 4 linters: enable: + - gofmt + - gofumpt - revive - unconvert - unparam + - whitespace diff --git a/vendor/github.com/containers/buildah/.packit.yaml b/vendor/github.com/containers/buildah/.packit.yaml index ab7681cbe..df576457c 100644 --- a/vendor/github.com/containers/buildah/.packit.yaml +++ b/vendor/github.com/containers/buildah/.packit.yaml @@ -62,14 +62,15 @@ jobs: - centos-stream-10-aarch64 enable_net: true - - job: copr_build - trigger: pull_request - packages: [buildah-rhel] - notifications: *copr_build_failure_notification - targets: - - epel-9-x86_64 - - epel-9-aarch64 - enable_net: true + # Disabled until there is go 1.22 in epel-9 + # - job: copr_build + # trigger: pull_request + # packages: [buildah-rhel] + # notifications: *copr_build_failure_notification + # targets: + # - epel-9-x86_64 + # - epel-9-aarch64 + # enable_net: true # Run on commit to main branch - job: copr_build diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index b2c180895..459442154 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,32 +2,126 @@ # Changelog -## v1.37.5 (2024-10-17) - - Bump the containers/storage library to v1.55.1 +## v1.38.0 (2024-11-08) + + Bump to c/common v0.61.0, c/image v5.33.0, c/storage v1.56.0 + fix(deps): update module golang.org/x/crypto to v0.29.0 + fix(deps): update module github.com/moby/buildkit to v0.17.1 + fix(deps): update module github.com/containers/storage to v1.56.0 + tests: skip two ulimit tests + CI VMs: bump f40 -> f41 + tests/tools: rebuild tools when we change versions + tests/tools: update golangci-lint to v1.61.0 + fix(deps): update module github.com/moby/buildkit to v0.17.0 + Handle RUN --mount with relative targets and no configured workdir + tests: bud: make parallel-safe + fix(deps): update module github.com/opencontainers/runc to v1.2.1 + fix(deps): update golang.org/x/exp digest to f66d83c + fix(deps): update github.com/opencontainers/runtime-tools digest to 6c9570a + tests: blobcache: use unique image name + tests: sbom: never write to cwd + tests: mkcw: bug fixes, refactor + deps: bump runc to v1.2.0 + deps: switch to moby/sys/userns + tests/test_runner.sh: remove some redundancies + Integration tests: run git daemon on a random-but-bind()able port + fix(deps): update module github.com/opencontainers/selinux to v1.11.1 + go.mod: remove unnecessary replace + Document more buildah build --secret options + Add support for COPY --exclude and ADD --exclude options + fix(deps): update github.com/containers/luksy digest to e2530d6 + chore(deps): update dependency containers/automation_images to v20241010 + fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.4 Properly validate cache IDs and sources - Packit: constrain koji job to fedora package to avoid dupes - -## v1.37.4 (2024-10-07) - - Fixed CVE-2024-9407 and CVE-2024-934 - -## v1.37.3 (2024-09-20) - + [skip-ci] Packit: constrain koji job to fedora package to avoid dupes + Audit and tidy OWNERS + fix(deps): update module golang.org/x/crypto to v0.28.0 + tests: add quotes to names + vendor: update c/common to latest + CVE-2024-9407: validate "bind-propagation" flag settings + vendor: switch to moby/sys/capability + Don't set ambient capabilities + Document that zstd:chunked is downgraded to zstd when encrypting + fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.3 + buildah-manifest-create.1: Fix manpage section + chore(deps): update dependency ubuntu to v24 + Make `buildah manifest push --all` true by default + chroot: add newlines at the end of printed error messages Do not error on trying to write IMA xattr as rootless + fix: remove duplicate conditions + fix(deps): update module github.com/moby/buildkit to v0.16.0 + fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.2 + Document how entrypoint is configured in buildah config + In a container, try to register binfmt_misc imagebuildah.StageExecutor: clean up volumes/volumeCache + build: fall back to parsing a TARGETPLATFORM build-arg `manifest add --artifact`: handle multiple values Packit: split out ELN jobs and reuse fedora downstream targets Packit: Enable sidetags for bodhi updates + fix(deps): update module github.com/docker/docker to v27.2.1+incompatible + tests/bud.bats: add git source + add: add support for git source + Add support for the new c/common pasta options + vendor latest c/common + fix(deps): update module golang.org/x/term to v0.24.0 + fix(deps): update module github.com/fsouza/go-dockerclient to v1.12.0 + packit: update fedora and epel targets + cirrus: disable f39 testing + cirrus: fix fedora names + update to go 1.22 + Vendor c/common:9d025e4cb348 + copier: handle globbing with "**" path components + fix(deps): update golang.org/x/exp digest to 9b4947d + fix(deps): update github.com/containers/luksy digest to 2e7307c + imagebuildah: make scratch config handling toggleable + fix(deps): update module github.com/docker/docker to v27.2.0+incompatible + Add a validation script for Makefile $(SOURCES) + fix(deps): update module github.com/openshift/imagebuilder to v1.2.15 + New VMs + Update some godocs, use 0o to prefix an octal in a comment + buildah-build.1.md: expand the --layer-label description + fix(deps): update module github.com/containers/common to v0.60.2 + run: fix a nil pointer dereference on FreeBSD + CI: enable the whitespace linter + Fix some govet linter warnings + Commit(): retry committing to local storage on storage.LayerUnknown + CI: enable the gofumpt linter + conformance: move weirdly-named files out of the repository + fix(deps): update module github.com/docker/docker to v27.1.2+incompatible + fix(deps): update module github.com/containers/common to v0.60.1 + *: use gofmt -s, add gofmt linter + *: fix build tags + fix(deps): update module github.com/containers/image/v5 to v5.32.1 + Add(): re-escape any globbed items that included escapes + conformance tests: use mirror.gcr.io for most images + unit tests: use test-specific policy.json and registries.conf + fix(deps): update module golang.org/x/sys to v0.24.0 + Update to spun-out "github.com/containerd/platforms" + Bump github.com/containerd/containerd + test/tools/Makefile: duplicate the vendor-in-container target + linters: unchecked error + linters: don't end loop iterations with "else" when "then" would + linters: unused arguments shouldn't have names + linters: rename checkIdsGreaterThan5() to checkIDsGreaterThan5() + linters: don't name variables "cap" + `make lint`: use --timeout instead of --deadline + Drop the e2e test suite + fix(deps): update module golang.org/x/crypto to v0.26.0 + fix(deps): update module github.com/onsi/gomega to v1.34.1 + `make vendor-in-container`: use the caller's Go cache if it exists + fix(deps): fix test/tools ginkgo typo + fix(deps): update module github.com/onsi/ginkgo/v2 to v2.19.1 + Update to keep up with API changes in storage + fix(deps): update github.com/containers/luksy digest to 1f482a9 + install: On Debian/Ubuntu, add installation of libbtrfs-dev + fix(deps): update module golang.org/x/sys to v0.23.0 + fix(deps): update golang.org/x/exp digest to 8a7402a + fix(deps): update module github.com/fsouza/go-dockerclient to v1.11.2 Use Epoch: 2 and respect the epoch in dependencies. - -## v1.37.2 (2024-08-20) - - [release-1.37] Bump c/common to v0.60.2, c/image to v5.32.2 - -## v1.37.1 (2024-08-12) - - [release-1.37] Bump c/common v0.60.1, c/image v5.32.1 + Bump to Buildah v1.38.0-dev + AddAndCopyOptions: add CertPath, InsecureSkipTLSVerify, Retry fields + Add PrependedLinkedLayers/AppendedLinkedLayers to CommitOptions + integration tests: teach starthttpd() about TLS and pid files ## vv1.37.0 (2024-07-26) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 61ec458ea..37dbeb43e 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -41,7 +41,7 @@ LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/jail/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go pkg/volumes/*.go util/*.go +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/binfmt/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/jail/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go pkg/volumes/*.go util/*.go LINTFLAGS ?= @@ -53,7 +53,7 @@ endif # Note: Uses the -N -l go compiler options to disable compiler optimizations # and inlining. Using these build options allows you to subsequently # use source debugging tools like delve. -all: bin/buildah bin/imgtype bin/copy bin/tutorial docs +all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial docs # Update nix/nixpkgs.json its latest stable commit .PHONY: nixpkgs @@ -110,6 +110,9 @@ bin/copy: $(SOURCES) tests/copy/copy.go bin/tutorial: $(SOURCES) tests/tutorial/tutorial.go $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/tutorial/tutorial.go +bin/inet: tests/inet/inet.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/inet/inet.go + .PHONY: clean clean: $(RM) -r bin tests/testreport/testreport @@ -133,6 +136,7 @@ validate: install.tools ./tests/validate/whitespace.sh ./hack/xref-helpmsgs-manpages ./tests/validate/pr-should-include-tests + ./hack/makefile_sources .PHONY: install.tools install.tools: @@ -184,7 +188,6 @@ test-conformance: .PHONY: test-integration test-integration: install.tools - ./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/. cd tests; ./test_runner.sh tests/testreport/testreport: tests/testreport/testreport.go @@ -199,7 +202,11 @@ test-unit: tests/testreport/testreport $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: - podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor + if test -d `go env GOCACHE` && test -w `go env GOCACHE` ; then \ + podman run --privileged --rm --env HOME=/root -v `go env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor ; \ + else \ + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor ; \ + fi .PHONY: vendor vendor: diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS index 2eb9ea84c..a34c6b256 100644 --- a/vendor/github.com/containers/buildah/OWNERS +++ b/vendor/github.com/containers/buildah/OWNERS @@ -1,28 +1,15 @@ approvers: - TomSweeneyRedHat - - ashley-cui - - cevich - - flouthoc - - giuseppe - lsm5 - nalind - rhatdan - umohnani8 - - vrothberg reviewers: - - QiWang19 - - TomSweeneyRedHat - ashley-cui - baude - - cevich - edsantiago + - flouthoc - giuseppe - - haircommander - - jwhonce - - lsm5 + - Honny1 - mheon - - mrunalp - - nalind - - rhatdan - - umohnani8 - vrothberg diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 4da572c0d..2e884ad89 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -2,6 +2,8 @@ package buildah import ( "archive/tar" + "context" + "crypto/tls" "errors" "fmt" "io" @@ -18,10 +20,17 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal/tmpdir" "github.com/containers/buildah/pkg/chrootuser" + "github.com/containers/common/pkg/retry" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/regexp" + "github.com/docker/go-connections/tlsconfig" "github.com/hashicorp/go-multierror" + "github.com/moby/sys/userns" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -29,7 +38,7 @@ import ( // AddAndCopyOptions holds options for add and copy commands. type AddAndCopyOptions struct { - //Chmod sets the access permissions of the destination content. + // Chmod sets the access permissions of the destination content. Chmod string // Chown is a spec for the user who should be given ownership over the // newly-added content, potentially overriding permissions which would @@ -72,20 +81,61 @@ type AddAndCopyOptions struct { // Clear the sticky bit on items being copied. Has no effect on // archives being extracted, where the bit is always preserved. StripStickyBit bool + // If not "", a directory containing a CA certificate (ending with + // ".crt"), a client certificate (ending with ".cert") and a client + // certificate key (ending with ".key") used when downloading sources + // from locations protected with TLS. + CertPath string + // Allow downloading sources from HTTPS where TLS verification fails. + InsecureSkipTLSVerify types.OptionalBool + // MaxRetries is the maximum number of attempts we'll make to retrieve + // contents from a remote location. + MaxRetries int + // RetryDelay is how long to wait before retrying attempts to retrieve + // remote contents. + RetryDelay time.Duration } -// sourceIsRemote returns true if "source" is a remote location. +// gitURLFragmentSuffix matches fragments to use as Git reference and build +// context from the Git repository e.g. +// +// github.com/containers/buildah.git +// github.com/containers/buildah.git#main +// github.com/containers/buildah.git#v1.35.0 +var gitURLFragmentSuffix = regexp.Delayed(`\.git(?:#.+)?$`) + +// sourceIsGit returns true if "source" is a git location. +func sourceIsGit(source string) bool { + return isURL(source) && gitURLFragmentSuffix.MatchString(source) +} + +func isURL(url string) bool { + return strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") +} + +// sourceIsRemote returns true if "source" is a remote location +// and *not* a git repo. Certain github urls such as raw.github.* are allowed. func sourceIsRemote(source string) bool { - return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") + return isURL(source) && !gitURLFragmentSuffix.MatchString(source) } // getURL writes a tar archive containing the named content -func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error { +func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool) error { url, err := url.Parse(src) if err != nil { return err } - response, err := http.Get(src) + tlsClientConfig := &tls.Config{ + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } + if err := tlsclientconfig.SetupCertificates(certPath, tlsClientConfig); err != nil { + return err + } + tlsClientConfig.InsecureSkipVerify = insecureSkipTLSVerify == types.OptionalBoolTrue + + tr := &http.Transport{TLSClientConfig: tlsClientConfig} + httpClient := &http.Client{Transport: tr} + response, err := httpClient.Get(src) if err != nil { return err } @@ -147,7 +197,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, uid = chown.UID gid = chown.GID } - var mode int64 = 0600 + var mode int64 = 0o600 if chmod != nil { mode = int64(*chmod) } @@ -201,6 +251,18 @@ func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool { return false } +// globbedToGlobbable takes a pathname which might include the '[', *, or ? +// characters, and converts it into a glob pattern that matches itself by +// marking the '[' characters as _not_ the beginning of match ranges and +// escaping the * and ? characters. +func globbedToGlobbable(glob string) string { + result := glob + result = strings.ReplaceAll(result, "[", "[[]") + result = strings.ReplaceAll(result, "?", "\\?") + result = strings.ReplaceAll(result, "*", "\\*") + return result +} + // Add copies the contents of the specified sources into the container's root // filesystem, optionally extracting contents of local files that look like // non-empty archives. @@ -233,18 +295,31 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } // Figure out what sorts of sources we have. - var localSources, remoteSources []string + var localSources, remoteSources, gitSources []string for i, src := range sources { + if src == "" { + return errors.New("empty source location") + } if sourceIsRemote(src) { remoteSources = append(remoteSources, src) continue } + if sourceIsGit(src) { + gitSources = append(gitSources, src) + continue + } if !filepath.IsAbs(src) && options.ContextDir == "" { sources[i] = filepath.Join(currentDir, src) } localSources = append(localSources, sources[i]) } + // Treat git sources as a subset of remote sources + // differentiating only in how we fetch the two later on. + if len(gitSources) > 0 { + remoteSources = append(remoteSources, gitSources...) + } + // Check how many items our local source specs matched. Each spec // should have matched at least one item, otherwise we consider it an // error. @@ -276,7 +351,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } numLocalSourceItems += len(localSourceStat.Globbed) } - if numLocalSourceItems+len(remoteSources) == 0 { + if numLocalSourceItems+len(remoteSources)+len(gitSources) == 0 { return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT) } @@ -333,6 +408,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption destCanBeFile = true } } + if len(gitSources) > 0 { + destMustBeDirectory = true + } } // We care if the destination either doesn't exist, or exists and is a @@ -404,7 +482,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption var multiErr *multierror.Error var getErr, closeErr, renameErr, putErr error var wg sync.WaitGroup - if sourceIsRemote(src) { + if sourceIsRemote(src) || sourceIsGit(src) { pipeReader, pipeWriter := io.Pipe() var srcDigest digest.Digest if options.Checksum != "" { @@ -413,12 +491,43 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption return fmt.Errorf("invalid checksum flag: %w", err) } } + wg.Add(1) - go func() { - getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest) - pipeWriter.Close() - wg.Done() - }() + if sourceIsGit(src) { + go func() { + var cloneDir string + cloneDir, _, getErr = define.TempDirForURL(tmpdir.GetTempDir(), "", src) + getOptions := copier.GetOptions{ + UIDMap: srcUIDMap, + GIDMap: srcGIDMap, + Excludes: options.Excludes, + ExpandArchives: extract, + ChownDirs: chownDirs, + ChmodDirs: chmodDirsFiles, + ChownFiles: chownFiles, + ChmodFiles: chmodDirsFiles, + StripSetuidBit: options.StripSetuidBit, + StripSetgidBit: options.StripSetgidBit, + StripStickyBit: options.StripStickyBit, + } + writer := io.WriteCloser(pipeWriter) + getErr = copier.Get(cloneDir, cloneDir, getOptions, []string{"."}, writer) + pipeWriter.Close() + wg.Done() + }() + } else { + go func() { + getErr = retry.IfNecessary(context.TODO(), func() error { + return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify) + }, &retry.Options{ + MaxRetry: options.MaxRetries, + Delay: options.RetryDelay, + }) + pipeWriter.Close() + wg.Done() + }() + } + wg.Add(1) go func() { b.ContentDigester.Start("") @@ -437,7 +546,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodDirs: nil, ChownFiles: nil, ChmodFiles: nil, - IgnoreDevices: runningInUserNS(), + IgnoreDevices: userns.RunningInUserNS(), } putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) } @@ -480,27 +589,27 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption // Iterate through every item that matched the glob. itemsCopied := 0 - for _, glob := range localSourceStat.Globbed { - rel := glob - if filepath.IsAbs(glob) { - if rel, err = filepath.Rel(contextDir, glob); err != nil { - return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err) + for _, globbed := range localSourceStat.Globbed { + rel := globbed + if filepath.IsAbs(globbed) { + if rel, err = filepath.Rel(contextDir, globbed); err != nil { + return fmt.Errorf("computing path of %q relative to %q: %w", globbed, contextDir, err) } } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir) + return fmt.Errorf("possible escaping context directory error: %q is outside of %q", globbed, contextDir) } // Check for dockerignore-style exclusion of this item. if rel != "." { excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck if err != nil { - return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err) + return fmt.Errorf("checking if %q(%q) is excluded: %w", globbed, rel, err) } if excluded { // non-directories that are excluded are excluded, no question, but // directories can only be skipped if we don't have to allow for the // possibility of finding things to include under them - globInfo := localSourceStat.Results[glob] + globInfo := localSourceStat.Results[globbed] if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) { continue } @@ -517,7 +626,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption // due to potentially not having anything in the tarstream that we passed. itemsCopied++ } - st := localSourceStat.Results[glob] + st := localSourceStat.Results[globbed] pipeReader, pipeWriter := io.Pipe() wg.Add(1) go func() { @@ -530,7 +639,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption return false, false, nil }) } - writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) { + writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) { itemsCopied++ return false, false, nil }) @@ -547,7 +656,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption StripSetgidBit: options.StripSetgidBit, StripStickyBit: options.StripStickyBit, } - getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer) + getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer) closeErr = writer.Close() if renameTarget != "" && renamedItems > 1 { renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems) @@ -578,7 +687,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodDirs: nil, ChownFiles: nil, ChmodFiles: nil, - IgnoreDevices: runningInUserNS(), + IgnoreDevices: userns.RunningInUserNS(), } putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) } @@ -640,7 +749,6 @@ func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, st } else { u.AdditionalGids = groups } - } return u, homeDir, err } diff --git a/vendor/github.com/containers/buildah/add_common.go b/vendor/github.com/containers/buildah/add_common.go deleted file mode 100644 index b1eef2c19..000000000 --- a/vendor/github.com/containers/buildah/add_common.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !linux -// +build !linux - -package buildah - -func runningInUserNS() bool { - return false -} diff --git a/vendor/github.com/containers/buildah/add_linux.go b/vendor/github.com/containers/buildah/add_linux.go deleted file mode 100644 index 78b742496..000000000 --- a/vendor/github.com/containers/buildah/add_linux.go +++ /dev/null @@ -1,9 +0,0 @@ -package buildah - -import ( - "github.com/opencontainers/runc/libcontainer/userns" -) - -func runningInUserNS() bool { - return userns.RunningInUserNS() -} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 0524eca83..89c02fb0b 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package bind @@ -49,7 +48,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou if err != nil { return nil, fmt.Errorf("checking permissions on %q: %w", bundlePath, err) } - if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil { + if err = os.Chmod(bundlePath, info.Mode()|0o111); err != nil { return nil, fmt.Errorf("loosening permissions on %q: %w", bundlePath, err) } @@ -116,7 +115,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // other unprivileged users outside of containers, shouldn't be able to // access. mnt := filepath.Join(bundlePath, "mnt") - if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil { + if err = idtools.MkdirAndChown(mnt, 0o100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil { return unmountAll, fmt.Errorf("creating %q owned by the container's root user: %w", mnt, err) } @@ -129,7 +128,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // Create a bind mount for the root filesystem and add it to the list. rootfs := filepath.Join(mnt, "rootfs") - if err = os.Mkdir(rootfs, 0000); err != nil { + if err = os.Mkdir(rootfs, 0o000); err != nil { return unmountAll, fmt.Errorf("creating directory %q: %w", rootfs, err) } if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { @@ -160,13 +159,13 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou if info.IsDir() { // If the source is a directory, make one to use as the // mount target. - if err = os.Mkdir(stage, 0000); err != nil { + if err = os.Mkdir(stage, 0o000); err != nil { return unmountAll, fmt.Errorf("creating directory %q: %w", stage, err) } } else { // If the source is not a directory, create an empty // file to use as the mount target. - file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000) + file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0o000) if err != nil { return unmountAll, fmt.Errorf("creating file %q: %w", stage, err) } diff --git a/vendor/github.com/containers/buildah/bind/mount_unsupported.go b/vendor/github.com/containers/buildah/bind/mount_unsupported.go index 88ca2ca8b..11cee0a47 100644 --- a/vendor/github.com/containers/buildah/bind/mount_unsupported.go +++ b/vendor/github.com/containers/buildah/bind/mount_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux +//go:build !linux package bind diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 1aa6e173d..dcadc46f2 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -191,6 +191,13 @@ type Builder struct { // CDIConfigDir is the location of CDI configuration files, if the files in // the default configuration locations shouldn't be used. CDIConfigDir string + // PrependedLinkedLayers and AppendedLinkedLayers are combinations of + // history entries and locations of either directory trees (if + // directories, per os.Stat()) or uncompressed layer blobs which should + // be added to the image at commit-time. The order of these relative + // to PrependedEmptyLayers and AppendedEmptyLayers in the committed + // image is not guaranteed. + PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer } // BuilderInfo are used as objects to display container information @@ -370,6 +377,11 @@ type BuilderOptions struct { // CDIConfigDir is the location of CDI configuration files, if the files in // the default configuration locations shouldn't be used. CDIConfigDir string + // CompatScratchConfig controls whether a "scratch" image is created + // with a truly empty configuration, as would have happened in the past + // (when set to true), or with a minimal initial configuration which + // has a working directory set in it. + CompatScratchConfig types.OptionalBool } // ImportOptions are used to initialize a Builder from an existing container @@ -563,7 +575,7 @@ func (b *Builder) Save() error { if err != nil { return err } - if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil { + if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0o600); err != nil { return fmt.Errorf("saving builder state to %q: %w", filepath.Join(cdir, stateFile), err) } return nil diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 4fe7dbf1f..2c41aea4e 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,24 +1,122 @@ -- Changelog for v1.37.5 (2024-10-17) - * Bump the containers/storage library to v1.55.1 +- Changelog for v1.38.0 (2024-11-08) + * Bump to c/common v0.61.0, c/image v5.33.0, c/storage v1.56.0 + * fix(deps): update module golang.org/x/crypto to v0.29.0 + * fix(deps): update module github.com/moby/buildkit to v0.17.1 + * fix(deps): update module github.com/containers/storage to v1.56.0 + * tests: skip two ulimit tests + * CI VMs: bump f40 -> f41 + * tests/tools: rebuild tools when we change versions + * tests/tools: update golangci-lint to v1.61.0 + * fix(deps): update module github.com/moby/buildkit to v0.17.0 + * Handle RUN --mount with relative targets and no configured workdir + * tests: bud: make parallel-safe + * fix(deps): update module github.com/opencontainers/runc to v1.2.1 + * fix(deps): update golang.org/x/exp digest to f66d83c + * fix(deps): update github.com/opencontainers/runtime-tools digest to 6c9570a + * tests: blobcache: use unique image name + * tests: sbom: never write to cwd + * tests: mkcw: bug fixes, refactor + * deps: bump runc to v1.2.0 + * deps: switch to moby/sys/userns + * tests/test_runner.sh: remove some redundancies + * Integration tests: run git daemon on a random-but-bind()able port + * fix(deps): update module github.com/opencontainers/selinux to v1.11.1 + * go.mod: remove unnecessary replace + * Document more buildah build --secret options + * Add support for COPY --exclude and ADD --exclude options + * fix(deps): update github.com/containers/luksy digest to e2530d6 + * chore(deps): update dependency containers/automation_images to v20241010 + * fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.4 * Properly validate cache IDs and sources - * Packit: constrain koji job to fedora package to avoid dupes - -- Changelog for v1.37.4 (2024-10-07) - * Fixed CVE-2024-9407 and CVE-2024-934 - -- Changelog for v1.37.3 (2024-09-20) + * [skip-ci] Packit: constrain koji job to fedora package to avoid dupes + * Audit and tidy OWNERS + * fix(deps): update module golang.org/x/crypto to v0.28.0 + * tests: add quotes to names + * vendor: update c/common to latest + * CVE-2024-9407: validate "bind-propagation" flag settings + * vendor: switch to moby/sys/capability + * Don't set ambient capabilities + * Document that zstd:chunked is downgraded to zstd when encrypting + * fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.3 + * buildah-manifest-create.1: Fix manpage section + * chore(deps): update dependency ubuntu to v24 + * Make `buildah manifest push --all` true by default + * chroot: add newlines at the end of printed error messages * Do not error on trying to write IMA xattr as rootless + * fix: remove duplicate conditions + * fix(deps): update module github.com/moby/buildkit to v0.16.0 + * fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.2 + * Document how entrypoint is configured in buildah config + * In a container, try to register binfmt_misc * imagebuildah.StageExecutor: clean up volumes/volumeCache + * build: fall back to parsing a TARGETPLATFORM build-arg * `manifest add --artifact`: handle multiple values * Packit: split out ELN jobs and reuse fedora downstream targets * Packit: Enable sidetags for bodhi updates + * fix(deps): update module github.com/docker/docker to v27.2.1+incompatible + * tests/bud.bats: add git source + * add: add support for git source + * Add support for the new c/common pasta options + * vendor latest c/common + * fix(deps): update module golang.org/x/term to v0.24.0 + * fix(deps): update module github.com/fsouza/go-dockerclient to v1.12.0 + * packit: update fedora and epel targets + * cirrus: disable f39 testing + * cirrus: fix fedora names + * update to go 1.22 + * Vendor c/common:9d025e4cb348 + * copier: handle globbing with "**" path components + * fix(deps): update golang.org/x/exp digest to 9b4947d + * fix(deps): update github.com/containers/luksy digest to 2e7307c + * imagebuildah: make scratch config handling toggleable + * fix(deps): update module github.com/docker/docker to v27.2.0+incompatible + * Add a validation script for Makefile $(SOURCES) + * fix(deps): update module github.com/openshift/imagebuilder to v1.2.15 + * New VMs + * Update some godocs, use 0o to prefix an octal in a comment + * buildah-build.1.md: expand the --layer-label description + * fix(deps): update module github.com/containers/common to v0.60.2 + * run: fix a nil pointer dereference on FreeBSD + * CI: enable the whitespace linter + * Fix some govet linter warnings + * Commit(): retry committing to local storage on storage.LayerUnknown + * CI: enable the gofumpt linter + * conformance: move weirdly-named files out of the repository + * fix(deps): update module github.com/docker/docker to v27.1.2+incompatible + * fix(deps): update module github.com/containers/common to v0.60.1 + * *: use gofmt -s, add gofmt linter + * *: fix build tags + * fix(deps): update module github.com/containers/image/v5 to v5.32.1 + * Add(): re-escape any globbed items that included escapes + * conformance tests: use mirror.gcr.io for most images + * unit tests: use test-specific policy.json and registries.conf + * fix(deps): update module golang.org/x/sys to v0.24.0 + * Update to spun-out "github.com/containerd/platforms" + * Bump github.com/containerd/containerd + * test/tools/Makefile: duplicate the vendor-in-container target + * linters: unchecked error + * linters: don't end loop iterations with "else" when "then" would + * linters: unused arguments shouldn't have names + * linters: rename checkIdsGreaterThan5() to checkIDsGreaterThan5() + * linters: don't name variables "cap" + * `make lint`: use --timeout instead of --deadline + * Drop the e2e test suite + * fix(deps): update module golang.org/x/crypto to v0.26.0 + * fix(deps): update module github.com/onsi/gomega to v1.34.1 + * `make vendor-in-container`: use the caller's Go cache if it exists + * fix(deps): fix test/tools ginkgo typo + * fix(deps): update module github.com/onsi/ginkgo/v2 to v2.19.1 + * Update to keep up with API changes in storage + * fix(deps): update github.com/containers/luksy digest to 1f482a9 + * install: On Debian/Ubuntu, add installation of libbtrfs-dev + * fix(deps): update module golang.org/x/sys to v0.23.0 + * fix(deps): update golang.org/x/exp digest to 8a7402a + * fix(deps): update module github.com/fsouza/go-dockerclient to v1.11.2 * Use Epoch: 2 and respect the epoch in dependencies. - -- Changelog for v1.37.2 (2024-08-20) - * [release-1.37] Bump c/common to v0.60.2, c/image to v5.32.2 - -- Changelog for v1.37.1 (2024-08-12) - * [release-1.37] Bump c/common v0.60.1, c/image v5.32.1 + * Bump to Buildah v1.38.0-dev + * AddAndCopyOptions: add CertPath, InsecureSkipTLSVerify, Retry fields + * Add PrependedLinkedLayers/AppendedLinkedLayers to CommitOptions + * integration tests: teach starthttpd() about TLS and pid files - Changelog for vv1.37.0 (2024-07-26) * Bump c/storage, c/image, c/common for v1.37.0 diff --git a/vendor/github.com/containers/buildah/chroot/pty_posix.go b/vendor/github.com/containers/buildah/chroot/pty_posix.go index 97c1b6589..827ed9747 100644 --- a/vendor/github.com/containers/buildah/chroot/pty_posix.go +++ b/vendor/github.com/containers/buildah/chroot/pty_posix.go @@ -1,5 +1,4 @@ //go:build freebsd && cgo -// +build freebsd,cgo package chroot diff --git a/vendor/github.com/containers/buildah/chroot/pty_ptmx.go b/vendor/github.com/containers/buildah/chroot/pty_ptmx.go index b1ba96bc9..53b35760b 100644 --- a/vendor/github.com/containers/buildah/chroot/pty_ptmx.go +++ b/vendor/github.com/containers/buildah/chroot/pty_ptmx.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package chroot @@ -16,7 +15,7 @@ import ( // this instead of posix_openpt is that it avoids cgo. func getPtyDescriptors() (int, int, error) { // Create a pseudo-terminal -- open a copy of the master side. - controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600) + controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0o600) if err != nil { return -1, -1, fmt.Errorf("opening PTY master using /dev/ptmx: %v", err) } @@ -37,7 +36,7 @@ func getPtyDescriptors() (int, int, error) { return -1, -1, fmt.Errorf("getting PTY number: %v", err) } ptyName := fmt.Sprintf("/dev/pts/%d", ptyN) - fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620) + fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0o620) if err != nil { return -1, -1, fmt.Errorf("opening PTY %q: %v", ptyName, err) } diff --git a/vendor/github.com/containers/buildah/chroot/pty_unsupported.go b/vendor/github.com/containers/buildah/chroot/pty_unsupported.go index 55ea597e3..5b6a67d58 100644 --- a/vendor/github.com/containers/buildah/chroot/pty_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/pty_unsupported.go @@ -1,6 +1,4 @@ //go:build !linux && !(freebsd && cgo) -// +build !linux -// +build !freebsd !cgo package chroot diff --git a/vendor/github.com/containers/buildah/chroot/run_common.go b/vendor/github.com/containers/buildah/chroot/run_common.go index 4ffe1382d..be2a571ce 100644 --- a/vendor/github.com/containers/buildah/chroot/run_common.go +++ b/vendor/github.com/containers/buildah/chroot/run_common.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package chroot @@ -74,7 +73,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade if err != nil { return err } - if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0o600); err != nil { return fmt.Errorf("storing runtime configuration: %w", err) } logrus.Debugf("config = %v", string(specbytes)) @@ -266,7 +265,7 @@ func runUsingChrootMain() { logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUID, rootGID, err) } // Set permissions on the PTY. - if err = ctty.Chmod(0620); err != nil { + if err = ctty.Chmod(0o620); err != nil { logrus.Errorf("error setting permissions of container PTY: %v", err) os.Exit(1) } @@ -499,7 +498,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io BundlePath: bundlePath, }) if conferr != nil { - fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q", runUsingChrootExecCommand) + fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q\n", runUsingChrootExecCommand) os.Exit(1) } @@ -526,7 +525,6 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) if err := setPlatformUnshareOptions(spec, cmd); err != nil { return 1, fmt.Errorf("setting platform unshare options: %w", err) - } interrupted := make(chan os.Signal, 100) cmd.Hook = func(int) error { @@ -569,7 +567,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io } } } - fmt.Fprintf(os.Stderr, "process exited with error: %v", err) + fmt.Fprintf(os.Stderr, "process exited with error: %v\n", err) os.Exit(1) } @@ -697,7 +695,7 @@ func runUsingChrootExecMain() { } logrus.Debugf("setting supplemental groups") if err = syscall.Setgroups(gids); err != nil { - fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v", err) + fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v\n", err) os.Exit(1) } } else { @@ -705,7 +703,7 @@ func runUsingChrootExecMain() { if strings.Trim(string(setgroups), "\n") != "deny" { logrus.Debugf("clearing supplemental groups") if err = syscall.Setgroups([]int{}); err != nil { - fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err) + fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v\n", err) os.Exit(1) } } @@ -713,7 +711,7 @@ func runUsingChrootExecMain() { logrus.Debugf("setting gid") if err = unix.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil { - fmt.Fprintf(os.Stderr, "error setting GID: %v", err) + fmt.Fprintf(os.Stderr, "error setting GID: %v\n", err) os.Exit(1) } @@ -734,7 +732,7 @@ func runUsingChrootExecMain() { logrus.Debugf("setting uid") if err = unix.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil { - fmt.Fprintf(os.Stderr, "error setting UID: %v", err) + fmt.Fprintf(os.Stderr, "error setting UID: %v\n", err) os.Exit(1) } @@ -747,7 +745,7 @@ func runUsingChrootExecMain() { logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) interrupted := make(chan os.Signal, 100) if err = cmd.Start(); err != nil { - fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err) + fmt.Fprintf(os.Stderr, "process failed to start with error: %v\n", err) } go func() { for range interrupted { @@ -774,7 +772,7 @@ func runUsingChrootExecMain() { } } } - fmt.Fprintf(os.Stderr, "process exited with error: %v", err) + fmt.Fprintf(os.Stderr, "process exited with error: %v\n", err) os.Exit(1) } } diff --git a/vendor/github.com/containers/buildah/chroot/run_freebsd.go b/vendor/github.com/containers/buildah/chroot/run_freebsd.go index 535491d6a..e32a4c9a4 100644 --- a/vendor/github.com/containers/buildah/chroot/run_freebsd.go +++ b/vendor/github.com/containers/buildah/chroot/run_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package chroot @@ -191,12 +190,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // XXX: This was copied from the linux version which supports bind mounting files. // Leaving it here since I plan to add this to FreeBSD's nullfs. if m.Type != "nullfs" || srcinfo.IsDir() { - if err = os.MkdirAll(target, 0111); err != nil { + if err = os.MkdirAll(target, 0o111); err != nil { return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err) } removes = append(removes, target) } else { - if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil { + if err = os.MkdirAll(filepath.Dir(target), 0o111); err != nil { return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err) } // Don't do this until we can support file mounts in nullfs @@ -219,7 +218,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( save := saveDir(spec, target) if err := fileutils.Exists(save); err != nil { if errors.Is(err, fs.ErrNotExist) { - err = os.MkdirAll(save, 0111) + err = os.MkdirAll(save, 0o111) } if err != nil { return undoBinds, fmt.Errorf("creating file mount save directory %q: %w", save, err) diff --git a/vendor/github.com/containers/buildah/chroot/run_linux.go b/vendor/github.com/containers/buildah/chroot/run_linux.go index 9c5f95f18..6fe93e092 100644 --- a/vendor/github.com/containers/buildah/chroot/run_linux.go +++ b/vendor/github.com/containers/buildah/chroot/run_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package chroot @@ -16,10 +15,10 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" + "github.com/moby/sys/capability" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" ) @@ -179,39 +178,39 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error { capMap := map[capability.CapType][]string{ capability.BOUNDING: spec.Process.Capabilities.Bounding, capability.EFFECTIVE: spec.Process.Capabilities.Effective, - capability.INHERITABLE: []string{}, + capability.INHERITABLE: {}, capability.PERMITTED: spec.Process.Capabilities.Permitted, - capability.AMBIENT: spec.Process.Capabilities.Ambient, + capability.AMBIENT: {}, } - knownCaps := capability.List() + knownCaps := capability.ListKnown() noCap := capability.Cap(-1) for capType, capList := range capMap { - for _, capToSet := range capList { - cap := noCap + for _, capSpec := range capList { + capToSet := noCap for _, c := range knownCaps { - if strings.EqualFold("CAP_"+c.String(), capToSet) { - cap = c + if strings.EqualFold("CAP_"+c.String(), capSpec) { + capToSet = c break } } - if cap == noCap { - return fmt.Errorf("mapping capability %q to a number", capToSet) + if capToSet == noCap { + return fmt.Errorf("mapping capability %q to a number", capSpec) } - caps.Set(capType, cap) + caps.Set(capType, capToSet) } - for _, capToSet := range keepCaps { - cap := noCap + for _, capSpec := range keepCaps { + capToSet := noCap for _, c := range knownCaps { - if strings.EqualFold("CAP_"+c.String(), capToSet) { - cap = c + if strings.EqualFold("CAP_"+c.String(), capSpec) { + capToSet = c break } } - if cap == noCap { - return fmt.Errorf("mapping capability %q to a number", capToSet) + if capToSet == noCap { + return fmt.Errorf("mapping capability %q to a number", capSpec) } - if currentCaps.Get(capType, cap) { - caps.Set(capType, cap) + if currentCaps.Get(capType, capToSet) { + caps.Set(capType, capToSet) } } } @@ -225,7 +224,7 @@ func makeRlimit(limit specs.POSIXRlimit) unix.Rlimit { return unix.Rlimit{Cur: limit.Soft, Max: limit.Hard} } -func createPlatformContainer(options runUsingChrootExecSubprocOptions) error { +func createPlatformContainer(_ runUsingChrootExecSubprocOptions) error { return errors.New("unsupported createPlatformContainer") } @@ -302,7 +301,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( subDev := filepath.Join(spec.Root.Path, "/dev") if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil { if errors.Is(err, os.ErrNotExist) { - err = os.Mkdir(subDev, 0755) + err = os.Mkdir(subDev, 0o755) if err == nil { err = unix.Mount("/dev", subDev, "bind", devFlags, "") } @@ -326,7 +325,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( subProc := filepath.Join(spec.Root.Path, "/proc") if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil { if errors.Is(err, os.ErrNotExist) { - err = os.Mkdir(subProc, 0755) + err = os.Mkdir(subProc, 0o755) if err == nil { err = unix.Mount("/proc", subProc, "bind", procFlags, "") } @@ -341,7 +340,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( subSys := filepath.Join(spec.Root.Path, "/sys") if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil { if errors.Is(err, os.ErrNotExist) { - err = os.Mkdir(subSys, 0755) + err = os.Mkdir(subSys, 0o755) if err == nil { err = unix.Mount("/sys", subSys, "bind", sysFlags, "") } @@ -364,9 +363,9 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil { msg := fmt.Sprintf("could not bind mount %q, skipping: %v", m.Mountpoint, err) if strings.HasPrefix(m.Mountpoint, "/sys") { - logrus.Infof(msg) + logrus.Info(msg) } else { - logrus.Warningf(msg) + logrus.Warning(msg) } continue } @@ -433,15 +432,15 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // The target isn't there yet, so create it. If the source is a directory, // we need a directory, otherwise we need a non-directory (i.e., a file). if srcinfo.IsDir() { - if err = os.MkdirAll(target, 0755); err != nil { + if err = os.MkdirAll(target, 0o755); err != nil { return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err) } } else { - if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil { + if err = os.MkdirAll(filepath.Dir(target), 0o755); err != nil { return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err) } var file *os.File - if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil { + if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0o755); err != nil { return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err) } file.Close() @@ -594,7 +593,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Create an empty directory for to use for masking directories. roEmptyDir := filepath.Join(bundlePath, "empty") if len(spec.Linux.MaskedPaths) > 0 { - if err := os.Mkdir(roEmptyDir, 0700); err != nil { + if err := os.Mkdir(roEmptyDir, 0o700); err != nil { return undoBinds, fmt.Errorf("creating empty directory %q: %w", roEmptyDir, err) } } diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index e875f330c..25e218ecf 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -1,5 +1,4 @@ //go:build linux && seccomp -// +build linux,seccomp package chroot diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go b/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go index 90e9f14cb..134a5c4aa 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd && seccomp -// +build freebsd,seccomp package chroot diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go index dc80dcd1d..951b1b6d8 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -1,5 +1,4 @@ //go:build (!linux && !freebsd) || !seccomp -// +build !linux,!freebsd !seccomp package chroot diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go index bba4b8254..c0203a36a 100644 --- a/vendor/github.com/containers/buildah/chroot/selinux.go +++ b/vendor/github.com/containers/buildah/chroot/selinux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package chroot diff --git a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go index 826b920e7..ae9ac8962 100644 --- a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package chroot diff --git a/vendor/github.com/containers/buildah/chroot/unsupported.go b/vendor/github.com/containers/buildah/chroot/unsupported.go index 677a0a2df..a2e7f45be 100644 --- a/vendor/github.com/containers/buildah/chroot/unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package chroot diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index e43f1b0a8..d8ee226e6 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -24,6 +24,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/stringid" digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" ) @@ -120,10 +121,11 @@ type CommitOptions struct { // OverrideConfig is applied. OverrideChanges []string // ExtraImageContent is a map which describes additional content to add - // to the committed image. The map's keys are filesystem paths in the - // image and the corresponding values are the paths of files whose - // contents will be used in their place. The contents will be owned by - // 0:0 and have mode 0644. Currently only accepts regular files. + // to the new layer in the committed image. The map's keys are + // filesystem paths in the image and the corresponding values are the + // paths of files whose contents will be used in their place. The + // contents will be owned by 0:0 and have mode 0o644. Currently only + // accepts regular files. ExtraImageContent map[string]string // SBOMScanOptions encapsulates options which control whether or not we // run scanners on the rootfs that we're about to commit, and how. @@ -132,17 +134,32 @@ type CommitOptions struct { // the image in Docker format. Newer BuildKit-based builds don't set // this field. CompatSetParent types.OptionalBool + // PrependedLinkedLayers and AppendedLinkedLayers are combinations of + // history entries and locations of either directory trees (if + // directories, per os.Stat()) or uncompressed layer blobs which should + // be added to the image at commit-time. The order of these relative + // to PrependedEmptyLayers and AppendedEmptyLayers, and relative to the + // corresponding members in the Builder object, in the committed image + // is not guaranteed. + PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer } -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// LinkedLayer combines a history entry with the location of either a directory +// tree (if it's a directory, per os.Stat()) or an uncompressed layer blob +// which should be added to the image at commit-time. The BlobPath and +// History.EmptyLayer fields should be considered mutually-exclusive. +type LinkedLayer struct { + History v1.History // history entry to add + BlobPath string // corresponding uncompressed blob file (layer as a tar archive), or directory tree to archive +} + +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} // checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment // variable, if it's set. The contents are expected to be a JSON-encoded @@ -348,6 +365,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options if options.ExtraImageContent == nil { options.ExtraImageContent = make(map[string]string, len(extraImageContent)) } + // merge in the scanner-generated content for k, v := range extraImageContent { if _, set := options.ExtraImageContent[k]; !set { options.ExtraImageContent[k] = v @@ -438,7 +456,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options logrus.Debugf("removing %v from assigned names to image %q", nameToRemove, img.ID) } if options.IIDFile != "" { - if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil { + if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0o644); err != nil { return imgID, nil, "", err } } @@ -487,7 +505,6 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options return imgID, nil, "", err } logrus.Debugf("added imgID %s to manifestID %s", imgID, manifestID) - } return imgID, ref, manifestDigest, nil } diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go index 165c49106..92a7cffff 100644 --- a/vendor/github.com/containers/buildah/common.go +++ b/vendor/github.com/containers/buildah/common.go @@ -2,6 +2,7 @@ package buildah import ( "context" + "errors" "io" "path/filepath" "time" @@ -11,6 +12,7 @@ import ( cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage" @@ -67,22 +69,31 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat return sc } -func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) { +func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, maybeWrappedDest, maybeWrappedSrc, directDest types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) { + return retryCopyImageWithOptions(ctx, policyContext, maybeWrappedDest, maybeWrappedSrc, directDest, copyOptions, maxRetries, retryDelay, true) +} + +func retryCopyImageWithOptions(ctx context.Context, policyContext *signature.PolicyContext, maybeWrappedDest, maybeWrappedSrc, directDest types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration, retryOnLayerUnknown bool) ([]byte, error) { var ( manifestBytes []byte err error - lastErr error ) - err = retry.RetryIfNecessary(ctx, func() error { - manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions) - if registry != nil && registry.Transport().Name() != docker.Transport.Name() { - lastErr = err - return nil - } + err = retry.IfNecessary(ctx, func() error { + manifestBytes, err = cp.Image(ctx, policyContext, maybeWrappedDest, maybeWrappedSrc, copyOptions) return err - }, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay}) - if lastErr != nil { - err = lastErr - } + }, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay, IsErrorRetryable: func(err error) bool { + if retryOnLayerUnknown && directDest.Transport().Name() == is.Transport.Name() && errors.Is(err, storage.ErrLayerUnknown) { + // we were trying to reuse a layer that belonged to an + // image that was deleted at just the right (worst + // possible) time? yeah, try again + return true + } + if directDest.Transport().Name() != docker.Transport.Name() { + // if we're not talking to a registry, then nah + return false + } + // hand it off to the default should-this-be-retried logic + return retry.IsErrorRetryable(err) + }}) return manifestBytes, err } diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 46120a46c..2501405ea 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -61,7 +61,7 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I return nil } -func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.SystemContext) error { +func (b *Builder) initConfig(ctx context.Context, sys *types.SystemContext, img types.Image, options *BuilderOptions) error { if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one. rawManifest, manifestMIMEType, err := img.Manifest(ctx) if err != nil { @@ -101,6 +101,21 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy } } } + } else { + if options == nil || options.CompatScratchConfig != types.OptionalBoolTrue { + b.Docker = docker.V2Image{ + V1Image: docker.V1Image{ + Config: &docker.Config{ + WorkingDir: "/", + }, + }, + } + b.OCIv1 = ociv1.Image{ + Config: ociv1.ImageConfig{ + WorkingDir: "/", + }, + } + } } b.setupLogger() @@ -753,3 +768,62 @@ func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, c func (b *Builder) ClearAppendedEmptyLayers() { b.AppendedEmptyLayers = nil } + +// AddPrependedLinkedLayer adds an item to the history that we'll create when +// committing the image, optionally with a layer, after any history we inherit +// from the base image, but before the history item that we'll use to describe +// the new layer that we're adding. +// The blobPath can be either the location of an uncompressed archive, or a +// directory whose contents will be archived to use as a layer blob. Leaving +// blobPath empty is functionally similar to calling AddPrependedEmptyLayer(). +func (b *Builder) AddPrependedLinkedLayer(created *time.Time, createdBy, author, comment, blobPath string) { + if created != nil { + copiedTimestamp := *created + created = &copiedTimestamp + } + b.PrependedLinkedLayers = append(b.PrependedLinkedLayers, LinkedLayer{ + BlobPath: blobPath, + History: ociv1.History{ + Created: created, + CreatedBy: createdBy, + Author: author, + Comment: comment, + EmptyLayer: blobPath == "", + }, + }) +} + +// ClearPrependedLinkedLayers clears the list of history entries that we'll add +// the committed image before the layer that we're adding (if we're adding it). +func (b *Builder) ClearPrependedLinkedLayers() { + b.PrependedLinkedLayers = nil +} + +// AddAppendedLinkedLayer adds an item to the history that we'll create when +// committing the image, optionally with a layer, after the history item that +// we'll use to describe the new layer that we're adding. +// The blobPath can be either the location of an uncompressed archive, or a +// directory whose contents will be archived to use as a layer blob. Leaving +// blobPath empty is functionally similar to calling AddAppendedEmptyLayer(). +func (b *Builder) AddAppendedLinkedLayer(created *time.Time, createdBy, author, comment, blobPath string) { + if created != nil { + copiedTimestamp := *created + created = &copiedTimestamp + } + b.AppendedLinkedLayers = append(b.AppendedLinkedLayers, LinkedLayer{ + BlobPath: blobPath, + History: ociv1.History{ + Created: created, + CreatedBy: createdBy, + Author: author, + Comment: comment, + EmptyLayer: blobPath == "", + }, + }) +} + +// ClearAppendedLinkedLayers clears the list of linked layers that we'll add to +// the committed image after the layer that we're adding (if we're adding it). +func (b *Builder) ClearAppendedLinkedLayers() { + b.AppendedLinkedLayers = nil +} diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 0dfdca397..c742601ea 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -32,15 +32,85 @@ const ( copierCommand = "buildah-copier" maxLoopsFollowed = 64 // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar - cISUID = 04000 // Set uid, from archive/tar - cISGID = 02000 // Set gid, from archive/tar - cISVTX = 01000 // Save text (sticky bit), from archive/tar + cISUID = 0o4000 // Set uid, from archive/tar + cISGID = 0o2000 // Set gid, from archive/tar + cISVTX = 0o1000 // Save text (sticky bit), from archive/tar ) func init() { reexec.Register(copierCommand, copierMain) } +// extendedGlob calls filepath.Glob() on the passed-in patterns. If there is a +// "**" component in the pattern, filepath.Glob() will be called with the "**" +// replaced with all of the subdirectories under that point, and the results +// will be concatenated. +func extendedGlob(pattern string) (matches []string, err error) { + subdirs := func(dir string) []string { + var subdirectories []string + if err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return nil + } + if d.IsDir() { + if rel, err := filepath.Rel(dir, path); err == nil { + subdirectories = append(subdirectories, rel) + } + } + return nil + }); err != nil { + subdirectories = []string{"."} + } + return subdirectories + } + expandPatterns := func(pattern string) []string { + components := []string{} + dir := pattern + file := "" + for dir != "" && dir != string(os.PathSeparator) { + dir, file = filepath.Split(dir) + components = append([]string{file}, components...) + dir = strings.TrimSuffix(dir, string(os.PathSeparator)) + } + patterns := []string{string(os.PathSeparator)} + for i := range components { + var nextPatterns []string + if components[i] == "**" { + for _, parent := range patterns { + nextSubdirs := subdirs(parent) + for _, nextSubdir := range nextSubdirs { + nextPatterns = append(nextPatterns, filepath.Join(parent, nextSubdir)) + } + } + } else { + for _, parent := range patterns { + nextPattern := filepath.Join(parent, components[i]) + nextPatterns = append(nextPatterns, nextPattern) + } + } + patterns = []string{} + seen := map[string]struct{}{} + for _, nextPattern := range nextPatterns { + if _, seen := seen[nextPattern]; seen { + continue + } + patterns = append(patterns, nextPattern) + seen[nextPattern] = struct{}{} + } + } + return patterns + } + patterns := expandPatterns(pattern) + for _, pattern := range patterns { + theseMatches, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + matches = append(matches, theseMatches...) + } + return matches, nil +} + // isArchivePath returns true if the specified path can be read like a (possibly // compressed) tarball. func isArchivePath(path string) bool { @@ -196,24 +266,19 @@ type StatForItem struct { } // getResponse encodes a response for a single Get request. -type getResponse struct { -} +type getResponse struct{} // putResponse encodes a response for a single Put request. -type putResponse struct { -} +type putResponse struct{} // mkdirResponse encodes a response for a single Mkdir request. -type mkdirResponse struct { -} +type mkdirResponse struct{} // removeResponse encodes a response for a single Remove request. -type removeResponse struct { -} +type removeResponse struct{} // EvalOptions controls parts of Eval()'s behavior. -type EvalOptions struct { -} +type EvalOptions struct{} // Eval evaluates the directory's path, including any intermediate symbolic // links. @@ -222,7 +287,7 @@ type EvalOptions struct { // If the directory is specified as an absolute path, it should either be the // root directory or a subdirectory of the root directory. Otherwise, the // directory is treated as a path relative to the root directory. -func Eval(root string, directory string, options EvalOptions) (string, error) { +func Eval(root string, directory string, _ EvalOptions) (string, error) { req := request{ Request: requestEval, Root: root, @@ -1004,7 +1069,7 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response { Glob: req.preservedGlobs[i], } // glob this pattern - globMatched, err := filepath.Glob(glob) + globMatched, err := extendedGlob(glob) if err != nil { s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob) } @@ -1132,7 +1197,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa var queue []string globMatchedCount := 0 for _, glob := range req.Globs { - globMatched, err := filepath.Glob(glob) + globMatched, err := extendedGlob(glob) if err != nil { return errorResponse("copier: get: glob %q: %v", glob, err) } @@ -1518,7 +1583,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID defaultDirUID, defaultDirGID = dirUID, dirGID } - defaultDirMode := os.FileMode(0755) + defaultDirMode := os.FileMode(0o755) if req.PutOptions.ChmodDirs != nil { defaultDirMode = *req.PutOptions.ChmodDirs } @@ -1559,7 +1624,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM for _, component := range strings.Split(rel, string(os.PathSeparator)) { subdir = filepath.Join(subdir, component) path := filepath.Join(req.Root, subdir) - if err := os.Mkdir(path, 0700); err == nil { + if err := os.Mkdir(path, 0o700); err == nil { if err = lchown(path, defaultDirUID, defaultDirGID); err != nil { return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err) } @@ -1593,7 +1658,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return nil } createFile := func(path string, tr *tar.Reader) (int64, error) { - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o600) if err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() { @@ -1611,13 +1676,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return 0, fmt.Errorf("copier: put: error removing item to be overwritten %q: %w", path, err) } } - f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o600) } if err != nil && os.IsPermission(err) { if err = makeDirectoryWriteable(filepath.Dir(path)); err != nil { return 0, err } - f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o600) } if err != nil { return 0, fmt.Errorf("copier: put: error opening file %q for writing: %w", path, err) @@ -1781,14 +1846,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM ignoredItems[nameBeforeRenaming] = struct{}{} goto nextHeader } - if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { + if err = mknod(path, chrMode(0o600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break } } if err = os.RemoveAll(path); err == nil { - err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))) + err = mknod(path, chrMode(0o600), int(mkdev(devMajor, devMinor))) } } case tar.TypeBlock: @@ -1796,26 +1861,26 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM ignoredItems[nameBeforeRenaming] = struct{}{} goto nextHeader } - if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { + if err = mknod(path, blkMode(0o600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break } } if err = os.RemoveAll(path); err == nil { - err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))) + err = mknod(path, blkMode(0o600), int(mkdev(devMajor, devMinor))) } } case tar.TypeDir: // FreeBSD can return EISDIR for "mkdir /": // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739. - if err = os.Mkdir(path, 0700); err != nil && (errors.Is(err, os.ErrExist) || errors.Is(err, syscall.EISDIR)) { + if err = os.Mkdir(path, 0o700); err != nil && (errors.Is(err, os.ErrExist) || errors.Is(err, syscall.EISDIR)) { if st, stErr := os.Lstat(path); stErr == nil && !st.IsDir() { if req.PutOptions.NoOverwriteNonDirDir { break } if err = os.Remove(path); err == nil { - err = os.Mkdir(path, 0700) + err = os.Mkdir(path, 0o700) } } else { err = stErr @@ -1836,14 +1901,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // the archive more than once for whatever reason directoryModes[path] = mode case tar.TypeFifo: - if err = mkfifo(path, 0600); err != nil && errors.Is(err, os.ErrExist) { + if err = mkfifo(path, 0o600); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break } } if err = os.RemoveAll(path); err == nil { - err = mkfifo(path, 0600) + err = mkfifo(path, 0o600) } } case tar.TypeXGlobalHeader: @@ -1930,7 +1995,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, if req.MkdirOptions.ChownNew != nil { dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID } - dirMode := os.FileMode(0755) + dirMode := os.FileMode(0o755) if req.MkdirOptions.ChmodNew != nil { dirMode = *req.MkdirOptions.ChmodNew } @@ -1957,7 +2022,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, for _, component := range strings.Split(rel, string(os.PathSeparator)) { subdir = filepath.Join(subdir, component) path := filepath.Join(req.Root, subdir) - if err := os.Mkdir(path, 0700); err == nil { + if err := os.Mkdir(path, 0o700); err == nil { if err = chown(path, dirUID, dirGID); err != nil { return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err) } diff --git a/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go b/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go index 062f489f2..cee34aca7 100644 --- a/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le) -// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le package copier diff --git a/vendor/github.com/containers/buildah/copier/hardlink_uint64.go b/vendor/github.com/containers/buildah/copier/hardlink_uint64.go index 37365ef05..e88e1899d 100644 --- a/vendor/github.com/containers/buildah/copier/hardlink_uint64.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_uint64.go @@ -1,5 +1,4 @@ //go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd || netbsd -// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd netbsd package copier diff --git a/vendor/github.com/containers/buildah/copier/hardlink_unix.go b/vendor/github.com/containers/buildah/copier/hardlink_unix.go index 67eebc0e2..f4d45fe91 100644 --- a/vendor/github.com/containers/buildah/copier/hardlink_unix.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package copier @@ -25,6 +24,7 @@ func (h *hardlinkChecker) Check(fi os.FileInfo) string { } return "" } + func (h *hardlinkChecker) Add(fi os.FileInfo, name string) { if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) diff --git a/vendor/github.com/containers/buildah/copier/hardlink_windows.go b/vendor/github.com/containers/buildah/copier/hardlink_windows.go index ec71ccea4..c2c444587 100644 --- a/vendor/github.com/containers/buildah/copier/hardlink_windows.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_windows.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package copier @@ -7,11 +6,11 @@ import ( "os" ) -type hardlinkChecker struct { -} +type hardlinkChecker struct{} func (h *hardlinkChecker) Check(fi os.FileInfo) string { return "" } + func (h *hardlinkChecker) Add(fi os.FileInfo, name string) { } diff --git a/vendor/github.com/containers/buildah/copier/mknod_int.go b/vendor/github.com/containers/buildah/copier/mknod_int.go index b9e9f6fef..e23e71713 100644 --- a/vendor/github.com/containers/buildah/copier/mknod_int.go +++ b/vendor/github.com/containers/buildah/copier/mknod_int.go @@ -1,5 +1,4 @@ //go:build !windows && !freebsd -// +build !windows,!freebsd package copier diff --git a/vendor/github.com/containers/buildah/copier/mknod_uint64.go b/vendor/github.com/containers/buildah/copier/mknod_uint64.go index ccddf36fb..10bc4c2eb 100644 --- a/vendor/github.com/containers/buildah/copier/mknod_uint64.go +++ b/vendor/github.com/containers/buildah/copier/mknod_uint64.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package copier diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go index 99b2ee7b6..30356caa2 100644 --- a/vendor/github.com/containers/buildah/copier/syscall_unix.go +++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package copier @@ -58,7 +57,7 @@ func lchown(path string, uid, gid int) error { return os.Lchown(path, uid, gid) } -func lutimes(isSymlink bool, path string, atime, mtime time.Time) error { +func lutimes(_ bool, path string, atime, mtime time.Time) error { if atime.IsZero() || mtime.IsZero() { now := time.Now() if atime.IsZero() { diff --git a/vendor/github.com/containers/buildah/copier/syscall_windows.go b/vendor/github.com/containers/buildah/copier/syscall_windows.go index 3a88d2d3e..d1bfadd12 100644 --- a/vendor/github.com/containers/buildah/copier/syscall_windows.go +++ b/vendor/github.com/containers/buildah/copier/syscall_windows.go @@ -1,4 +1,4 @@ -// +build windows +//go:build windows package copier @@ -83,6 +83,6 @@ func sameDevice(a, b os.FileInfo) bool { } const ( - testModeMask = int64(0600) + testModeMask = int64(0o600) testIgnoreSymlinkDates = true ) diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go index c0d93ac15..f90bb1f50 100644 --- a/vendor/github.com/containers/buildah/copier/xattrs.go +++ b/vendor/github.com/containers/buildah/copier/xattrs.go @@ -1,5 +1,4 @@ //go:build linux || netbsd || freebsd || darwin -// +build linux netbsd freebsd darwin package copier diff --git a/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go index 750d842f8..59444ff61 100644 --- a/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go +++ b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!netbsd,!freebsd,!darwin +//go:build !linux && !netbsd && !freebsd && !darwin package copier diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index b50f26b61..68f3455b3 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -19,12 +19,11 @@ type AdditionalBuildContext struct { IsURL bool // Value is the name of an image which may or may not have already been pulled. IsImage bool - // Value holds a URL, an image name, or an absolute filesystem path. + // Value holds a URL (if IsURL), an image name (if IsImage), or an absolute filesystem path. Value string - // Absolute filesystem path to downloaded and exported build context - // from external tar archive. This will be populated only if following - // buildcontext is created from IsURL and was downloaded before in any - // of the RUN step. + // Absolute filesystem path to a downloaded and exported build context + // from an external tar archive. This will be populated only if the + // build context was a URL and its contents have been downloaded. DownloadedCache string } @@ -374,4 +373,10 @@ type BuildOptions struct { // base images or by a VOLUME instruction to be preserved during RUN // instructions. Newer BuildKit-based docker build doesn't bother. CompatVolumes types.OptionalBool + // CompatScratchConfig causes the image, if it does not have a base + // image, to begin with a truly empty default configuration instead of + // a minimal default configuration. Newer BuildKit-based docker build + // provides a minimal initial configuration with a working directory + // set in it. + CompatScratchConfig types.OptionalBool } diff --git a/vendor/github.com/containers/buildah/define/mount_freebsd.go b/vendor/github.com/containers/buildah/define/mount_freebsd.go index ae5ccc5f5..c0ac6991b 100644 --- a/vendor/github.com/containers/buildah/define/mount_freebsd.go +++ b/vendor/github.com/containers/buildah/define/mount_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package define @@ -11,7 +10,5 @@ const ( TempDir = "/var/tmp" ) -var ( - // Mount potions for bind - BindOptions = []string{} -) +// Mount potions for bind +var BindOptions = []string{} diff --git a/vendor/github.com/containers/buildah/define/mount_linux.go b/vendor/github.com/containers/buildah/define/mount_linux.go index 9d59cb6c3..335309101 100644 --- a/vendor/github.com/containers/buildah/define/mount_linux.go +++ b/vendor/github.com/containers/buildah/define/mount_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package define @@ -11,7 +10,5 @@ const ( TempDir = "/dev/shm" ) -var ( - // Mount potions for bind - BindOptions = []string{"bind"} -) +// Mount potions for bind +var BindOptions = []string{"bind"} diff --git a/vendor/github.com/containers/buildah/define/mount_unsupported.go b/vendor/github.com/containers/buildah/define/mount_unsupported.go index 3c93e2ff7..3feab28c3 100644 --- a/vendor/github.com/containers/buildah/define/mount_unsupported.go +++ b/vendor/github.com/containers/buildah/define/mount_unsupported.go @@ -1,5 +1,4 @@ //go:build darwin || windows || netbsd -// +build darwin windows netbsd package define @@ -11,7 +10,5 @@ const ( TempDir = "/var/tmp" ) -var ( - // Mount potions for bind - BindOptions = []string{""} -) +// Mount potions for bind +var BindOptions = []string{""} diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 5267d0872..044705f38 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,7 +29,7 @@ const ( // identify working containers. Package = "buildah" // Version for the Package. Also used by .packit.sh for Packit builds. - Version = "1.37.5" + Version = "1.38.0" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -254,9 +254,16 @@ func parseGitBuildContext(url string) (string, string, string) { return gitBranchPart[0], gitSubdir, gitBranch } +func isGitTag(remote, ref string) bool { + if _, err := exec.Command("git", "ls-remote", "--exit-code", remote, ref).Output(); err != nil { + return true + } + return false +} + func cloneToDirectory(url, dir string) ([]byte, string, error) { var cmd *exec.Cmd - gitRepo, gitSubdir, gitBranch := parseGitBuildContext(url) + gitRepo, gitSubdir, gitRef := parseGitBuildContext(url) // init repo cmd = exec.Command("git", "init", dir) combinedOutput, err := cmd.CombinedOutput() @@ -270,27 +277,23 @@ func cloneToDirectory(url, dir string) ([]byte, string, error) { if err != nil { return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %w", err) } - // fetch required branch or commit and perform checkout - // Always default to `HEAD` if nothing specified - fetch := "HEAD" - if gitBranch != "" { - fetch = gitBranch + + if gitRef != "" { + if ok := isGitTag(url, gitRef); ok { + gitRef += ":refs/tags/" + gitRef + } } - logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, fetch, dir) - cmd = exec.Command("git", "fetch", "--depth=1", "origin", "--", fetch) + + logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, gitRef, dir) + args := []string{"fetch", "-u", "--depth=1", "origin", "--", gitRef} + cmd = exec.Command("git", args...) cmd.Dir = dir combinedOutput, err = cmd.CombinedOutput() if err != nil { return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %w", err) } - if fetch == "HEAD" { - // We fetched default branch therefore - // we don't have any valid `branch` or - // `commit` name hence checkout detached - // `FETCH_HEAD` - fetch = "FETCH_HEAD" - } - cmd = exec.Command("git", "checkout", fetch) + + cmd = exec.Command("git", "checkout", "FETCH_HEAD") cmd.Dir = dir combinedOutput, err = cmd.CombinedOutput() if err != nil { @@ -324,7 +327,7 @@ func downloadToDirectory(url, dir string) error { } dockerfile := filepath.Join(dir, "Dockerfile") // Assume this is a Dockerfile - if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil { + if err := ioutils.AtomicWriteFile(dockerfile, body, 0o600); err != nil { return fmt.Errorf("failed to write %q to %q: %w", url, dockerfile, err) } } @@ -342,7 +345,7 @@ func stdinToDirectory(dir string) error { if err := chrootarchive.Untar(reader, dir, nil); err != nil { dockerfile := filepath.Join(dir, "Dockerfile") // Assume this is a Dockerfile - if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil { + if err := ioutils.AtomicWriteFile(dockerfile, b, 0o600); err != nil { return fmt.Errorf("failed to write bytes to %q: %w", dockerfile, err) } } diff --git a/vendor/github.com/containers/buildah/define/types_unix.go b/vendor/github.com/containers/buildah/define/types_unix.go index c57e29d97..20b1a7b2f 100644 --- a/vendor/github.com/containers/buildah/define/types_unix.go +++ b/vendor/github.com/containers/buildah/define/types_unix.go @@ -1,4 +1,4 @@ -// +build darwin linux +//go:build darwin || linux package define diff --git a/vendor/github.com/containers/buildah/define/types_unsupported.go b/vendor/github.com/containers/buildah/define/types_unsupported.go index 64e26d377..7db272041 100644 --- a/vendor/github.com/containers/buildah/define/types_unsupported.go +++ b/vendor/github.com/containers/buildah/define/types_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!darwin +//go:build !linux && !darwin package define diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 2b4eeb4d7..e5665d36c 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -26,6 +26,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" @@ -33,6 +34,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) const ( @@ -80,7 +82,9 @@ type containerImageRef struct { parent string blobDirectory string preEmptyLayers []v1.History + preLayers []commitLinkedLayerInfo postEmptyLayers []v1.History + postLayers []commitLinkedLayerInfo overrideChanges []string overrideConfig *manifest.Schema2Config extraImageContent map[string]string @@ -92,6 +96,13 @@ type blobLayerInfo struct { Size int64 } +type commitLinkedLayerInfo struct { + layerID string // more like layer "ID" + linkedLayer LinkedLayer + uncompressedDigest digest.Digest + size int64 +} + type containerImageSource struct { path string ref *containerImageRef @@ -278,7 +289,6 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) errChan <- err pipeWriter.Close() - }() return ioutils.NewReadCloserWrapper(pipeReader, func() error { if err = pipeReader.Close(); err != nil { @@ -398,7 +408,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, return oimage, omanifest, dimage, dmanifest, nil } -func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { +func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) { // Decide which type of manifest and configuration output we're going to provide. manifestType := i.preferredManifestType // If it's not a format we support, return an error. @@ -406,8 +416,17 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) } - // Start building the list of layers using the read-write layer. + // These maps will let us check if a layer ID is part of one group or another. + parentLayerIDs := make(map[string]bool) + apiLayerIDs := make(map[string]bool) + // Start building the list of layers with any prepended layers. layers := []string{} + for _, preLayer := range i.preLayers { + layers = append(layers, preLayer.layerID) + apiLayerIDs[preLayer.layerID] = true + } + // Now look at the read-write layer, and prepare to work our way back + // through all of its parent layers. layerID := i.layerID layer, err := i.store.Layer(layerID) if err != nil { @@ -417,7 +436,15 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // or making a confidential workload, we're only producing one layer, so stop at // the layer ID of the top layer, which we won't really be using anyway. for layer != nil { - layers = append(append([]string{}, layerID), layers...) + if layerID == i.layerID { + // append the layer for this container to the list, + // whether it's first or after some prepended layers + layers = append(layers, layerID) + } else { + // prepend this parent layer to the list + layers = append(append([]string{}, layerID), layers...) + parentLayerIDs[layerID] = true + } layerID = layer.Parent if layerID == "" || i.confidentialWorkload.Convert || i.squash { err = nil @@ -430,14 +457,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } layer = nil - // If we're slipping in a synthesized layer, we need to add a placeholder for it - // to the list. + // If we're slipping in a synthesized layer to hold some files, we need + // to add a placeholder for it to the list just after the read-write + // layer. Confidential workloads and squashed images will just inline + // the files, so we don't need to create a layer in those cases. const synthesizedLayerID = "(synthesized layer)" if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash { layers = append(layers, synthesizedLayerID) } + // Now add any API-supplied layers we have to append. + for _, postLayer := range i.postLayers { + layers = append(layers, postLayer.layerID) + apiLayerIDs[postLayer.layerID] = true + } logrus.Debugf("layer list: %q", layers) + // It's simpler from here on to keep track of these as a group. + apiLayers := append(slices.Clone(i.preLayers), slices.Clone(i.postLayers)...) + // Make a temporary directory to hold blobs. path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package) if err != nil { @@ -469,21 +506,26 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if i.confidentialWorkload.Convert || i.squash { what = fmt.Sprintf("container %q", i.containerID) } + if layerID == synthesizedLayerID { + what = synthesizedLayerID + } + if apiLayerIDs[layerID] { + what = layerID + } // The default layer media type assumes no compression. omediaType := v1.MediaTypeImageLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer // Look up this layer. var layerUncompressedDigest digest.Digest var layerUncompressedSize int64 - if layerID != synthesizedLayerID { - layer, err := i.store.Layer(layerID) - if err != nil { - return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) - } - layerID = layer.ID - layerUncompressedDigest = layer.UncompressedDigest - layerUncompressedSize = layer.UncompressedSize - } else { + linkedLayerHasLayerID := func(l commitLinkedLayerInfo) bool { return l.layerID == layerID } + if apiLayerIDs[layerID] { + // API-provided prepended or appended layer + apiLayerIndex := slices.IndexFunc(apiLayers, linkedLayerHasLayerID) + layerUncompressedDigest = apiLayers[apiLayerIndex].uncompressedDigest + layerUncompressedSize = apiLayers[apiLayerIndex].size + } else if layerID == synthesizedLayerID { + // layer diff consisting of extra files to synthesize into a layer diffFilename, digest, size, err := i.makeExtraImageContentDiff(true) if err != nil { return nil, fmt.Errorf("unable to generate layer for additional content: %w", err) @@ -492,10 +534,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System extraImageContentDiffDigest = digest layerUncompressedDigest = digest layerUncompressedSize = size + } else { + // "normal" layer + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) + } + layerID = layer.ID + layerUncompressedDigest = layer.UncompressedDigest + layerUncompressedSize = layer.UncompressedSize } - // If we already know the digest of the contents of parent - // layers, reuse their blobsums, diff IDs, and sizes. - if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" { + // We already know the digest of the contents of parent layers, + // so if this is a parent layer, and we know its digest, reuse + // its blobsum, diff ID, and size. + if !i.confidentialWorkload.Convert && !i.squash && parentLayerIDs[layerID] && layerUncompressedDigest != "" { layerBlobSum := layerUncompressedDigest layerBlobSize := layerUncompressedSize diffID := layerUncompressedDigest @@ -546,7 +598,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, err } } else { - if layerID != synthesizedLayerID { + if apiLayerIDs[layerID] { + // We're reading an API-supplied blob. + apiLayerIndex := slices.IndexFunc(apiLayers, linkedLayerHasLayerID) + f, err := os.Open(apiLayers[apiLayerIndex].linkedLayer.BlobPath) + if err != nil { + return nil, fmt.Errorf("opening layer blob for %s: %w", layerID, err) + } + rc = f + } else if layerID == synthesizedLayerID { + // Slip in additional content as an additional layer. + if rc, err = os.Open(extraImageContentDiff); err != nil { + return nil, err + } + } else { // If we're up to the final layer, but we don't want to // include a diff for it, we're done. if i.emptyLayer && layerID == i.layerID { @@ -557,16 +622,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, fmt.Errorf("extracting %s: %w", what, err) } - } else { - // Slip in additional content as an additional layer. - if rc, err = os.Open(extraImageContentDiff); err != nil { - return nil, err - } } } srcHasher := digest.Canonical.Digester() // Set up to write the possibly-recompressed blob. - layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) + layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0o600) if err != nil { rc.Close() return nil, fmt.Errorf("opening file for %s: %w", what, err) @@ -678,7 +738,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } // Build history notes in the image configurations. - appendHistory := func(history []v1.History) { + appendHistory := func(history []v1.History, empty bool) { for i := range history { var created *time.Time if history[i].Created != nil { @@ -690,7 +750,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System CreatedBy: history[i].CreatedBy, Author: history[i].Author, Comment: history[i].Comment, - EmptyLayer: true, + EmptyLayer: empty, } oimage.History = append(oimage.History, onews) if created == nil { @@ -701,7 +761,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System CreatedBy: history[i].CreatedBy, Author: history[i].Author, Comment: history[i].Comment, - EmptyLayer: true, + EmptyLayer: empty, } dimage.History = append(dimage.History, dnews) } @@ -712,36 +772,38 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Keep track of how many entries the base image's history had // before we started adding to it. baseImageHistoryLen := len(oimage.History) - appendHistory(i.preEmptyLayers) + + // Add history entries for prepended empty layers. + appendHistory(i.preEmptyLayers, true) + // Add history entries for prepended API-supplied layers. + for _, h := range i.preLayers { + appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) + } + // Add a history entry for this layer, empty or not. created := time.Now().UTC() if i.created != nil { created = (*i.created).UTC() } - comment := i.historyComment - // Add a comment indicating which base image was used, if it wasn't - // just an image ID. - if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { - comment += "FROM " + i.fromImageName - } onews := v1.History{ Created: &created, CreatedBy: i.createdBy, Author: oimage.Author, EmptyLayer: i.emptyLayer, + Comment: i.historyComment, } oimage.History = append(oimage.History, onews) - oimage.History[baseImageHistoryLen].Comment = comment dnews := docker.V2S2History{ Created: created, CreatedBy: i.createdBy, Author: dimage.Author, EmptyLayer: i.emptyLayer, + Comment: i.historyComment, } dimage.History = append(dimage.History, dnews) - dimage.History[baseImageHistoryLen].Comment = comment - appendHistory(i.postEmptyLayers) - // Add a history entry for the extra image content if we added a layer for it. + // This diff was added to the list of layers before API-supplied layers that + // needed to be appended, and we need to keep the order of history entries for + // not-empty layers consistent with that. if extraImageContentDiff != "" { createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded()) onews := v1.History{ @@ -755,6 +817,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } dimage.History = append(dimage.History, dnews) } + // Add history entries for appended empty layers. + appendHistory(i.postEmptyLayers, true) + // Add history entries for appended API-supplied layers. + for _, h := range i.postLayers { + appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) + } + + // Assemble a comment indicating which base image was used, if it wasn't + // just an image ID, and add it to the first history entry we added. + var fromComment string + if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != "" && !strings.HasPrefix(i.fromImageID, i.fromImageName) { + if oimage.History[baseImageHistoryLen].Comment != "" { + fromComment = " " + } + fromComment += "FROM " + i.fromImageName + } + oimage.History[baseImageHistoryLen].Comment += fromComment + dimage.History[baseImageHistoryLen].Comment += fromComment // Confidence check that we didn't just create a mismatch between non-empty layers in the // history and the number of diffIDs. Only applicable if the base image (if there was @@ -841,7 +921,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return src, nil } -func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) { +func (i *containerImageRef) NewImageDestination(_ context.Context, _ *types.SystemContext) (types.ImageDestination, error) { return nil, errors.New("can't write to a container") } @@ -885,15 +965,15 @@ func (i *containerImageSource) Reference() types.ImageReference { return i.ref } -func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { +func (i *containerImageSource) GetSignatures(_ context.Context, _ *digest.Digest) ([][]byte, error) { return nil, nil } -func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { +func (i *containerImageSource) GetManifest(_ context.Context, _ *digest.Digest) ([]byte, string, error) { return i.manifest, i.manifestType, nil } -func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { +func (i *containerImageSource) LayerInfosForCopy(_ context.Context, _ *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } @@ -901,7 +981,7 @@ func (i *containerImageSource) HasThreadSafeGetBlob() bool { return false } -func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) { +func (i *containerImageSource) GetBlob(_ context.Context, blob types.BlobInfo, _ types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) { if blob.Digest == i.configDigest { logrus.Debugf("start reading config") reader := bytes.NewReader(i.config) @@ -923,7 +1003,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, } else { for _, blobDir := range []string{i.blobDirectory, i.path} { var layerFile *os.File - layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0600) + layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0o600) if err == nil { st, err := layerFile.Stat() if err != nil { @@ -1018,11 +1098,92 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str return diff.Name(), digester.Digest(), counter.Count, nil } +// makeLinkedLayerInfos calculates the size and digest information for a layer +// we intend to add to the image that we're committing. +func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string) ([]commitLinkedLayerInfo, error) { + if layers == nil { + return nil, nil + } + infos := make([]commitLinkedLayerInfo, 0, len(layers)) + for i, layer := range layers { + // complain if EmptyLayer and "is the BlobPath empty" don't agree + if layer.History.EmptyLayer != (layer.BlobPath == "") { + return nil, fmt.Errorf("internal error: layer-is-empty = %v, but content path is %q", layer.History.EmptyLayer, layer.BlobPath) + } + // if there's no layer contents, we're done with this one + if layer.History.EmptyLayer { + continue + } + // check if it's a directory or a non-directory + st, err := os.Stat(layer.BlobPath) + if err != nil { + return nil, fmt.Errorf("checking if layer content %s is a directory: %w", layer.BlobPath, err) + } + info := commitLinkedLayerInfo{ + layerID: fmt.Sprintf("(%s %d)", layerType, i+1), + linkedLayer: layer, + } + if err = func() error { + if st.IsDir() { + // if it's a directory, archive it and digest the archive while we're storing a copy somewhere + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return fmt.Errorf("determining directory for working container: %w", err) + } + f, err := os.CreateTemp(cdir, "") + if err != nil { + return fmt.Errorf("creating temporary file to hold blob for %q: %w", info.linkedLayer.BlobPath, err) + } + defer f.Close() + rc, err := chrootarchive.Tar(info.linkedLayer.BlobPath, nil, info.linkedLayer.BlobPath) + if err != nil { + return fmt.Errorf("generating a layer blob from %q: %w", info.linkedLayer.BlobPath, err) + } + digester := digest.Canonical.Digester() + sizeCounter := ioutils.NewWriteCounter(digester.Hash()) + _, copyErr := io.Copy(f, io.TeeReader(rc, sizeCounter)) + if err := rc.Close(); err != nil { + return fmt.Errorf("storing a copy of %q: %w", info.linkedLayer.BlobPath, err) + } + if copyErr != nil { + return fmt.Errorf("storing a copy of %q: %w", info.linkedLayer.BlobPath, copyErr) + } + info.uncompressedDigest = digester.Digest() + info.size = sizeCounter.Count + info.linkedLayer.BlobPath = f.Name() + } else { + // if it's not a directory, just digest it + f, err := os.Open(info.linkedLayer.BlobPath) + if err != nil { + return err + } + defer f.Close() + sizeCounter := ioutils.NewWriteCounter(io.Discard) + uncompressedDigest, err := digest.Canonical.FromReader(io.TeeReader(f, sizeCounter)) + if err != nil { + return err + } + info.uncompressedDigest = uncompressedDigest + info.size = sizeCounter.Count + } + return nil + }(); err != nil { + return nil, err + } + infos = append(infos, info) + } + return infos, nil +} + // makeContainerImageRef creates a containers/image/v5/types.ImageReference // which is mainly used for representing the working container as a source -// image that can be copied, which is how we commit container to create the +// image that can be copied, which is how we commit the container to create the // image. func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { + if (len(options.PrependedLinkedLayers) > 0 || len(options.AppendedLinkedLayers) > 0) && + (options.ConfidentialWorkloadOptions.Convert || options.Squash) { + return nil, errors.New("can't add prebuilt layers and produce an image with only one layer, at the same time") + } var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -1080,6 +1241,15 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR } } + preLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(b.PrependedLinkedLayers), slices.Clone(options.PrependedLinkedLayers)...), "prepended layer") + if err != nil { + return nil, err + } + postLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(options.AppendedLinkedLayers), slices.Clone(b.AppendedLinkedLayers)...), "appended layer") + if err != nil { + return nil, err + } + ref := &containerImageRef{ fromImageName: b.FromImage, fromImageID: b.FromImageID, @@ -1104,13 +1274,29 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR idMappingOptions: &b.IDMappingOptions, parent: parent, blobDirectory: options.BlobDirectory, - preEmptyLayers: b.PrependedEmptyLayers, - postEmptyLayers: b.AppendedEmptyLayers, + preEmptyLayers: slices.Clone(b.PrependedEmptyLayers), + preLayers: preLayerInfos, + postEmptyLayers: slices.Clone(b.AppendedEmptyLayers), + postLayers: postLayerInfos, overrideChanges: options.OverrideChanges, overrideConfig: options.OverrideConfig, extraImageContent: maps.Clone(options.ExtraImageContent), compatSetParent: options.CompatSetParent, } + if ref.created != nil { + for i := range ref.preEmptyLayers { + ref.preEmptyLayers[i].Created = ref.created + } + for i := range ref.preLayers { + ref.preLayers[i].linkedLayer.History.Created = ref.created + } + for i := range ref.postEmptyLayers { + ref.postEmptyLayers[i].Created = ref.created + } + for i := range ref.postLayers { + ref.postLayers[i].linkedLayer.History.Created = ref.created + } + } return ref, nil } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 9dc5d7bf5..a4b373fa0 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -16,7 +16,7 @@ import ( "strings" "sync" - "github.com/containerd/containerd/platforms" + "github.com/containerd/platforms" "github.com/containers/buildah" "github.com/containers/buildah/define" internalUtil "github.com/containers/buildah/internal/util" @@ -229,6 +229,17 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B if platform.OS == "" && platform.Arch != "" { platform.OS = runtime.GOOS } + if platform.OS == "" && platform.Arch == "" { + if targetPlatform, ok := options.Args["TARGETPLATFORM"]; ok { + targetPlatform, err := platforms.Parse(targetPlatform) + if err != nil { + return "", nil, fmt.Errorf("parsing TARGETPLATFORM value %q: %w", targetPlatform, err) + } + platform.OS = targetPlatform.OS + platform.Arch = targetPlatform.Architecture + platform.Variant = targetPlatform.Variant + } + } platformSpec := internalUtil.NormalizePlatform(v1.Platform{ OS: platform.OS, Architecture: platform.Arch, diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 0fcc66ac2..b2526d039 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -162,6 +162,7 @@ type Executor struct { cdiConfigDir string compatSetParent types.OptionalBool compatVolumes types.OptionalBool + compatScratchConfig types.OptionalBool } type imageTypeAndHistoryAndDiffIDs struct { @@ -220,7 +221,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o if options.RusageLogFile == "" { rusageLogFile = options.Out } else { - rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return nil, fmt.Errorf("creating file to store rusage logs: %w", err) } @@ -320,6 +321,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o cdiConfigDir: options.CDIConfigDir, compatSetParent: options.CompatSetParent, compatVolumes: options.CompatVolumes, + compatScratchConfig: options.CompatScratchConfig, } if exec.err == nil { exec.err = os.Stderr @@ -1050,7 +1052,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } logrus.Debugf("printing final image id %q", imageID) if b.iidfile != "" { - if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil { + if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0o644); err != nil { return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err) } } else { diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 34e836ad2..9ac5cc431 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -373,10 +373,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err return errors.New("COPY --parents is not supported") } if len(cp.Excludes) > 0 { - if cp.Download { - return errors.New("ADD --excludes is not supported") - } - return errors.New("COPY --excludes is not supported") + excludes = append(slices.Clone(excludes), cp.Excludes...) } } s.builder.ContentDigester.Restart() @@ -435,7 +432,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co if err != nil { return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err) } - err = tmpFile.Chmod(0644) // 644 is consistent with buildkit + err = tmpFile.Chmod(0o644) // 644 is consistent with buildkit if err != nil { tmpFile.Close() return fmt.Errorf("unable to chmod tmp file created for COPY instruction at %q: %w", tmpFile.Name(), err) @@ -572,6 +569,13 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co IDMappingOptions: idMappingOptions, StripSetuidBit: stripSetuid, StripSetgidBit: stripSetgid, + // The values for these next two fields are ultimately + // based on command line flags with names that sound + // much more generic. + CertPath: s.executor.systemContext.DockerCertPath, + InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify, + MaxRetries: s.executor.maxPullPushRetries, + RetryDelay: s.executor.retryPullPushDelay, } if len(copy.Files) > 0 { // If we are copying heredoc files, we need to temporary place @@ -596,10 +600,8 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co return nil } -// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided -// Stage can automatically cleanup this mounts when a stage is removed -// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run. -// stages mounted here will we used be Run(). +// Returns a map of StageName/ImageName:internal.StageMountDetails for the +// items in the passed-in mounts list which include a "from=" value. func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) { stageMountPoints := make(map[string]internal.StageMountDetails) for _, flag := range mountList { @@ -622,9 +624,11 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte if fromErr != nil { return nil, fmt.Errorf("unable to resolve argument %q: %w", val, fromErr) } - // If additional buildContext contains this - // give priority to that and break if additional - // is not an external image. + // If the value corresponds to an additional build context, + // the mount source is either either the rootfs of the image, + // the filesystem path, or a temporary directory populated + // with the contents of the URL, all in preference to any + // stage which might have the value as its name. if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok { if additionalBuildContext.IsImage { mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value) @@ -637,39 +641,38 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte // `from` to refer from stageMountPoints map later. stageMountPoints[from] = internal.StageMountDetails{IsStage: false, DidExecute: true, MountPoint: mountPoint} break - } else { - // Most likely this points to path on filesystem - // or external tar archive, Treat it as a stage - // nothing is different for this. So process and - // point mountPoint to path on host and it will - // be automatically handled correctly by since - // GetBindMount will honor IsStage:false while - // processing stageMountPoints. - mountPoint := additionalBuildContext.Value - if additionalBuildContext.IsURL { - // Check if following buildContext was already - // downloaded before in any other RUN step. If not - // download it and populate DownloadCache field for - // future RUN steps. - if additionalBuildContext.DownloadedCache == "" { - // additional context contains a tar file - // so download and explode tar to buildah - // temp and point context to that. - path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) - if err != nil { - return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) - } - // point context dir to the extracted path - mountPoint = filepath.Join(path, subdir) - // populate cache for next RUN step - additionalBuildContext.DownloadedCache = mountPoint - } else { - mountPoint = additionalBuildContext.DownloadedCache + } + // Most likely this points to path on filesystem + // or external tar archive, Treat it as a stage + // nothing is different for this. So process and + // point mountPoint to path on host and it will + // be automatically handled correctly by since + // GetBindMount will honor IsStage:false while + // processing stageMountPoints. + mountPoint := additionalBuildContext.Value + if additionalBuildContext.IsURL { + // Check if following buildContext was already + // downloaded before in any other RUN step. If not + // download it and populate DownloadCache field for + // future RUN steps. + if additionalBuildContext.DownloadedCache == "" { + // additional context contains a tar file + // so download and explode tar to buildah + // temp and point context to that. + path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + if err != nil { + return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) } + // point context dir to the extracted path + mountPoint = filepath.Join(path, subdir) + // populate cache for next RUN step + additionalBuildContext.DownloadedCache = mountPoint + } else { + mountPoint = additionalBuildContext.DownloadedCache } - stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: true, MountPoint: mountPoint} - break } + stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: true, MountPoint: mountPoint} + break } // If the source's name corresponds to the // result of an earlier stage, wait for that @@ -677,10 +680,13 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { return nil, err } + // If the source's name is a stage, return a + // pointer to its rootfs. if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: otherStage.didExecute, MountPoint: otherStage.mountPoint} break } else { + // Treat the source's name as the name of an image. mountPoint, err := s.getImageRootfs(s.ctx, from) if err != nil { return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from) @@ -708,7 +714,7 @@ func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.Fil f.Close() return nil, err } - err = f.Chmod(0755) + err = f.Chmod(0o755) if err != nil { f.Close() return nil, err @@ -878,20 +884,20 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command)) err := fmt.Sprintf(errStr+"%#v", step) if s.executor.ignoreUnrecognizedInstructions { - logrus.Debugf(err) + logrus.Debug(err) return nil } switch logrus.GetLevel() { case logrus.ErrorLevel: - s.executor.logger.Errorf(errStr) + s.executor.logger.Error(errStr) case logrus.DebugLevel: - logrus.Debugf(err) + logrus.Debug(err) default: s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step) } - return fmt.Errorf(err) + return errors.New(err) } // prepare creates a working container based on the specified image, or if one @@ -978,6 +984,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo MountLabel: s.executor.mountLabel, PreserveBaseImageAnns: preserveBaseImageAnnotations, CDIConfigDir: s.executor.cdiConfigDir, + CompatScratchConfig: s.executor.compatScratchConfig, } builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions) @@ -1198,7 +1205,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if output != "" { commitMessage = fmt.Sprintf("%s %s", commitMessage, output) } - logrus.Debugf(commitMessage) + logrus.Debug(commitMessage) if !s.executor.quiet { s.log(commitMessage) } @@ -1315,12 +1322,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Also check the chmod and the chown flags for validity. for _, flag := range step.Flags { command := strings.ToUpper(step.Command) - // chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from=' - if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") { - return "", nil, false, fmt.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") + // chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from=' or '--exclude=' + if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from" || flag == "--exclude") { + return "", nil, false, fmt.Errorf("COPY only supports the --chmod= --chown= --from= and the --exclude= flags") } - if command == "ADD" && (flag == "--chmod" || flag == "--chown" || flag == "--checksum") { - return "", nil, false, fmt.Errorf("ADD only supports the --chmod=, --chown=, and --checksum= flags") + if command == "ADD" && (flag == "--chmod" || flag == "--chown" || flag == "--checksum" || flag == "--exclude") { + return "", nil, false, fmt.Errorf("ADD only supports the --chmod=, --chown=, and --checksum= --exclude= flags") } if strings.Contains(flag, "--from") && command == "COPY" { arr := strings.Split(flag, "=") @@ -1350,14 +1357,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // since this additional context // is not an image. break - } else { - // replace with image set in build context - from = additionalBuildContext.Value - if _, err := s.getImageRootfs(ctx, from); err != nil { - return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) - } - break } + // replace with image set in build context + from = additionalBuildContext.Value + if _, err := s.getImageRootfs(ctx, from); err != nil { + return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) + } + break } // If the source's name corresponds to the @@ -1406,30 +1412,29 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } s.builder.AddPrependedEmptyLayer(×tamp, s.getCreatedBy(node, addedContentSummary), "", "") continue - } else { - // This is the last instruction for this stage, - // so we should commit this container to create - // an image, but only if it's the last stage, - // or if it's used as the basis for a later - // stage. - if lastStage || imageIsUsedLater { - logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction) - if err != nil { - return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err) - } - logImageID(imgID) - // Generate build output if needed. - if canGenerateBuildOutput { - if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, false, err - } + } + // This is the last instruction for this stage, + // so we should commit this container to create + // an image, but only if it's the last stage, + // or if it's used as the basis for a later + // stage. + if lastStage || imageIsUsedLater { + logCommit(s.output, i) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction) + if err != nil { + return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err) + } + logImageID(imgID) + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, false, err } - } else { - imgID = "" } - break + } else { + imgID = "" } + break } // We're in a multi-layered build. @@ -2106,7 +2111,7 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen if err != nil { logrus.Debugf("failed pulling cache from source %s: %v", src, err) continue // failed pulling this one try next - //return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err) + // return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err) } logrus.Debugf("successfully pulled cache from repo %s: %s", src, id) return src.DockerReference(), id, nil diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 60a8e01d1..bdfeeaf90 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -109,7 +109,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system CommonBuildOpts: &CommonBuildOptions{}, } - if err := builder.initConfig(ctx, image, systemContext); err != nil { + if err := builder.initConfig(ctx, systemContext, image, nil); err != nil { return nil, fmt.Errorf("preparing image configuration: %w", err) } diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 2764b8cb5..87657897e 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -252,7 +252,7 @@ In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands: ``` sudo apt-get -y -qq update - sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo + sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo libbtrfs-dev ``` Then to install Buildah follow the steps in this example: diff --git a/vendor/github.com/containers/buildah/internal/config/executor.go b/vendor/github.com/containers/buildah/internal/config/executor.go index 19b1429b7..932c656b9 100644 --- a/vendor/github.com/containers/buildah/internal/config/executor.go +++ b/vendor/github.com/containers/buildah/internal/config/executor.go @@ -17,26 +17,26 @@ import ( // from a Dockerfile. Try anything more than that and it'll return an error. type configOnlyExecutor struct{} -func (g *configOnlyExecutor) Preserve(path string) error { +func (g *configOnlyExecutor) Preserve(_ string) error { return errors.New("ADD/COPY/RUN not supported as changes") } -func (g *configOnlyExecutor) EnsureContainerPath(path string) error { +func (g *configOnlyExecutor) EnsureContainerPath(_ string) error { return nil } -func (g *configOnlyExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { +func (g *configOnlyExecutor) EnsureContainerPathAs(_, _ string, _ *os.FileMode) error { return nil } -func (g *configOnlyExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { +func (g *configOnlyExecutor) Copy(_ []string, copies ...imagebuilder.Copy) error { if len(copies) == 0 { return nil } return errors.New("ADD/COPY not supported as changes") } -func (g *configOnlyExecutor) Run(run imagebuilder.Run, config dockerclient.Config) error { +func (g *configOnlyExecutor) Run(_ imagebuilder.Run, _ dockerclient.Config) error { return errors.New("RUN not supported as changes") } diff --git a/vendor/github.com/containers/buildah/internal/mkcw/archive.go b/vendor/github.com/containers/buildah/internal/mkcw/archive.go index 7517205da..7b7860320 100644 --- a/vendor/github.com/containers/buildah/internal/mkcw/archive.go +++ b/vendor/github.com/containers/buildah/internal/mkcw/archive.go @@ -319,7 +319,7 @@ func Archive(rootfsPath string, ociConfig *v1.Image, options ArchiveOptions) (io imageSize := slop(options.ImageSize, options.Slop) if imageSize == 0 { var sourceSize int64 - if err := filepath.WalkDir(rootfsPath, func(path string, d fs.DirEntry, err error) error { + if err := filepath.WalkDir(rootfsPath, func(_ string, d fs.DirEntry, err error) error { if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { return err } diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go index ee87eca22..1c8ef7243 100644 --- a/vendor/github.com/containers/buildah/internal/types.go +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -1,18 +1,18 @@ package internal const ( - // Temp directory which stores external artifacts which are download for a build. - // Example: tar files from external sources. + // BuildahExternalArtifactsDir is the pattern passed to os.MkdirTemp() + // to generate a temporary directory which will be used to hold + // external items which are downloaded for a build, typically a tarball + // being used as an additional build context. BuildahExternalArtifactsDir = "buildah-external-artifacts" ) -// Types is internal packages are suspected to change with releases avoid using these outside of buildah - // StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor // StageExecutor has ability to mount stages/images in current context and // automatically clean them up. type StageMountDetails struct { DidExecute bool // tells if the stage which is being mounted was freshly executed or was part of older cache - IsStage bool // tells if mountpoint returned from stage executor is stage or image - MountPoint string // mountpoint of stage/image + IsStage bool // true if the mountpoint is a temporary directory or a stage's rootfs, false if it's an image + MountPoint string // mountpoint of the stage or image's root directory } diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go index dbcaa2375..6643d97ff 100644 --- a/vendor/github.com/containers/buildah/internal/util/util.go +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -72,7 +72,7 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { noLChown = true } - err = os.MkdirAll(opts.Path, 0700) + err = os.MkdirAll(opts.Path, 0o700) if err != nil { return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err) } diff --git a/vendor/github.com/containers/buildah/internal/volumes/volumes.go b/vendor/github.com/containers/buildah/internal/volumes/volumes.go index 610e9fcf1..fa9a8bb35 100644 --- a/vendor/github.com/containers/buildah/internal/volumes/volumes.go +++ b/vendor/github.com/containers/buildah/internal/volumes/volumes.go @@ -2,6 +2,7 @@ package volumes import ( "context" + "errors" "fmt" "os" "path" @@ -9,8 +10,6 @@ import ( "strconv" "strings" - "errors" - "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" @@ -231,7 +230,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st // GetCacheMount parses a single cache mount entry from the --mount flag. // // If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). -func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { +func GetCacheMount(args []string, _ storage.Store, _ string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { var err error var mode uint64 var buildahLockFilesDir string @@ -247,7 +246,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a } // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id id := "" - // buildkit parity: cache directory defaults to 755 + // buildkit parity: cache directory defaults to 0o755 mode = 0o755 // buildkit parity: cache directory defaults to uid 0 if not specified uid := 0 @@ -359,8 +358,9 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a } if fromStage != "" { - // do not create cache on host - // instead use read-only mounted stage as cache + // do not create and use a cache directory on the host, + // instead use the location in the mounted stage or + // temporary directory as the cache mountPoint := "" if additionalMountPoints != nil { if val, ok := additionalMountPoints[fromStage]; ok { @@ -369,8 +369,8 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a } } } - // Cache does not supports using image so if not stage found - // return with error + // Cache does not support using an image so if there's no such + // stage or temporary directory, return an error if mountPoint == "" { return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage) } @@ -381,16 +381,16 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a } newMount.Source = evaluated } else { - // we need to create cache on host if no image is being used + // we need to create the cache directory on the host if no image is being used - // since type is cache and cache can be reused by consecutive builds + // since type is cache and a cache can be reused by consecutive builds // create a common cache directory, which persists on hosts within temp lifecycle // add subdirectory if specified // cache parent directory: creates separate cache parent for each user. cacheParent := CacheParent() // create cache on host if not present - err = os.MkdirAll(cacheParent, os.FileMode(0755)) + err = os.MkdirAll(cacheParent, os.FileMode(0o755)) if err != nil { return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err) } @@ -410,7 +410,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a UID: uid, GID: gid, } - // buildkit parity: change uid and gid if specified otheriwise keep `0` + // buildkit parity: change uid and gid if specified, otherwise keep `0` err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) if err != nil { return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) @@ -418,7 +418,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a // create a subdirectory inside `cacheParent` just to store lockfiles buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir) - err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700)) + err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0o700)) if err != nil { return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err) } @@ -531,6 +531,7 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, // getMounts takes user-provided input from the --mount flag and creates OCI // spec mounts. // buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=cache,target=/var/cache ... // buildah run --mount type=tmpfs,target=/dev/shm ... // // If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). @@ -590,7 +591,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c } finalMounts[mount.Destination] = mount case TypeTmpfs: - mount, err := GetTmpfsMount(tokens) + mount, err := GetTmpfsMount(tokens, workDir) if err != nil { return nil, mountedImages, nil, err } @@ -608,7 +609,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c } // GetTmpfsMount parses a single tmpfs mount entry from the --mount flag -func GetTmpfsMount(args []string) (specs.Mount, error) { +func GetTmpfsMount(args []string, workDir string) (specs.Mount, error) { newMount := specs.Mount{ Type: TypeTmpfs, Source: TypeTmpfs, @@ -646,10 +647,14 @@ func GetTmpfsMount(args []string) (specs.Mount, error) { if !hasArgValue { return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - if err := parse.ValidateVolumeCtrDir(argValue); err != nil { + targetPath := argValue + if !path.IsAbs(targetPath) { + targetPath = filepath.Join(workDir, targetPath) + } + if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { return newMount, err } - newMount.Destination = argValue + newMount.Destination = targetPath setDest = true default: return newMount, fmt.Errorf("%v: %w", argName, errBadMntOption) diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index c07921684..d441aa502 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -243,7 +243,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions suffixDigitsModulo := 100 for { - var flags map[string]interface{} // check if we have predefined ProcessLabel and MountLabel // this could be true if this is another stage in a build @@ -335,7 +334,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions } } - if err := builder.initConfig(ctx, src, systemContext); err != nil { + if err := builder.initConfig(ctx, systemContext, src, &options); err != nil { return nil, fmt.Errorf("preparing image configuration: %w", err) } diff --git a/vendor/github.com/containers/buildah/pkg/binfmt/binfmt.go b/vendor/github.com/containers/buildah/pkg/binfmt/binfmt.go new file mode 100644 index 000000000..e6f0ac62f --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/binfmt/binfmt.go @@ -0,0 +1,83 @@ +//go:build linux + +package binfmt + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// MaybeRegister() calls Register() if the current context is a rootless one, +// or if the "container" environment variable suggests that we're in a +// container. +func MaybeRegister(configurationSearchDirectories []string) error { + if unshare.IsRootless() || os.Getenv("container") != "" { // we _also_ own our own mount namespace + return Register(configurationSearchDirectories) + } + return nil +} + +// Register() registers binfmt.d emulators described by configuration files in +// the passed-in slice of directories, or in the union of /etc/binfmt.d, +// /run/binfmt.d, and /usr/lib/binfmt.d if the slice has no items. If any +// emulators are configured, it will attempt to mount a binfmt_misc filesystem +// in the current mount namespace first, ignoring only EPERM and EACCES errors. +func Register(configurationSearchDirectories []string) error { + if len(configurationSearchDirectories) == 0 { + configurationSearchDirectories = []string{"/etc/binfmt.d", "/run/binfmt.d", "/usr/lib/binfmt.d"} + } + mounted := false + for _, searchDir := range configurationSearchDirectories { + globs, err := filepath.Glob(filepath.Join(searchDir, "*.conf")) + if err != nil { + return fmt.Errorf("looking for binfmt.d configuration in %q: %w", searchDir, err) + } + for _, conf := range globs { + f, err := os.Open(conf) + if err != nil { + return fmt.Errorf("reading binfmt.d configuration: %w", err) + } + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if len(line) == 0 || line[0] == ';' || line[0] == '#' { + continue + } + if !mounted { + if err = unix.Mount("none", "/proc/sys/fs/binfmt_misc", "binfmt_misc", 0, ""); err != nil { + if errors.Is(err, syscall.EPERM) || errors.Is(err, syscall.EACCES) { + // well, we tried. no need to make a stink about it + return nil + } + return fmt.Errorf("mounting binfmt_misc: %w", err) + } + mounted = true + } + reg, err := os.Create("/proc/sys/fs/binfmt_misc/register") + if err != nil { + return fmt.Errorf("registering(open): %w", err) + } + if _, err = fmt.Fprintf(reg, "%s\n", line); err != nil { + return fmt.Errorf("registering(write): %w", err) + } + logrus.Tracef("registered binfmt %q", line) + if err = reg.Close(); err != nil { + return fmt.Errorf("registering(close): %w", err) + } + } + if err := f.Close(); err != nil { + return fmt.Errorf("reading binfmt.d configuration: %w", err) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/pkg/binfmt/binfmt_unsupported.go b/vendor/github.com/containers/buildah/pkg/binfmt/binfmt_unsupported.go new file mode 100644 index 000000000..6bb1667e5 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/binfmt/binfmt_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package binfmt + +import "syscall" + +// MaybeRegister() returns no error. +func MaybeRegister(configurationSearchDirectories []string) error { + return nil +} + +// Register() returns an error. +func Register(configurationSearchDirectories []string) error { + return syscall.ENOSYS +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go index 4614ecf90..e81e01d43 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go @@ -8,11 +8,9 @@ import ( "strings" ) -var ( - // ErrNoSuchUser indicates that the user provided by the caller does not - // exist in /etc/passws - ErrNoSuchUser = errors.New("user does not exist in /etc/passwd") -) +// ErrNoSuchUser indicates that the user provided by the caller does not +// exist in /etc/passws +var ErrNoSuchUser = errors.New("user does not exist in /etc/passwd") // GetUser will return the uid, gid of the user specified in the userspec // it will use the /etc/passwd and /etc/group files inside of the rootdir diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go index 5655a54db..b97064307 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package chrootuser diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go index 0ccaf8a68..56a65ac01 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package chrootuser @@ -76,9 +75,7 @@ func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error return cmd, stdout, nil } -var ( - lookupUser, lookupGroup sync.Mutex -) +var lookupUser, lookupGroup sync.Mutex type lookupPasswdEntry struct { name string diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 666a0a01a..653d6cf05 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -1,6 +1,7 @@ package overlay import ( + "errors" "fmt" "os" "os/exec" @@ -8,8 +9,6 @@ import ( "strings" "syscall" - "errors" - "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" @@ -54,7 +53,7 @@ type Options struct { // TempDir generates an overlay Temp directory in the container content func TempDir(containerDir string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay") - if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(contentDir, 0o700, rootUID, rootGID); err != nil { return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err) } @@ -69,7 +68,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) { // GenerateStructure generates an overlay directory structure for container content func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name) - if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(contentDir, 0o700, rootUID, rootGID); err != nil { return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err) } @@ -80,14 +79,14 @@ func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string, error) { upperDir := filepath.Join(containerDir, "upper") workDir := filepath.Join(containerDir, "work") - if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(upperDir, 0o700, rootUID, rootGID); err != nil { return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err) } - if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(workDir, 0o700, rootUID, rootGID); err != nil { return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err) } mergeDir := filepath.Join(containerDir, "merge") - if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(mergeDir, 0o700, rootUID, rootGID); err != nil { return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err) } diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go index 46d0c44aa..cb0e28fe1 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go @@ -27,7 +27,7 @@ func MountWithOptions(contentDir, source, dest string, opts *Options) (mount spe if opts.ReadOnly { // Read-only overlay mounts require two lower layer. lowerTwo := filepath.Join(contentDir, "lower") - if err := os.Mkdir(lowerTwo, 0755); err != nil { + if err := os.Mkdir(lowerTwo, 0o755); err != nil { return mount, err } overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go index 538db65e0..de7ce7d2b 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go @@ -1,5 +1,4 @@ //go:build !freebsd && !linux -// +build !freebsd,!linux package overlay diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index abc0a05d9..695b9c334 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -15,7 +15,7 @@ import ( "strings" "unicode" - "github.com/containerd/containerd/platforms" + "github.com/containerd/platforms" "github.com/containers/buildah/define" mkcwtypes "github.com/containers/buildah/internal/mkcw/types" internalParse "github.com/containers/buildah/internal/parse" @@ -57,6 +57,8 @@ const ( BuildahCacheDir = "buildah-cache" ) +var errInvalidSecretSyntax = errors.New("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV][,type=file|env]") + // RepoNamesToNamedReferences parse the raw string to Named reference func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) { var result []reference.Named @@ -250,7 +252,6 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } - } if commonOpts.SeccompProfilePath == "" { @@ -328,7 +329,7 @@ func validateExtraHost(val string) error { // validateIPAddress validates an Ip address. // for dns, ip, and ip6 flags also func validateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) + ip := net.ParseIP(strings.TrimSpace(val)) if ip != nil { return ip.String(), nil } @@ -636,7 +637,9 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) { username, password := parseCreds(creds) if username == "" { fmt.Print("Username: ") - fmt.Scanln(&username) + if _, err := fmt.Scanln(&username); err != nil { + return nil, fmt.Errorf("reading user name: %w", err) + } } if password == "" { fmt.Print("Password: ") @@ -659,15 +662,19 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { if len(buildOutput) == 1 && buildOutput == "-" { // Feature parity with buildkit, output tar to stdout // Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs - return define.BuildOutputOption{Path: "", + return define.BuildOutputOption{ + Path: "", IsDir: false, - IsStdout: true}, nil + IsStdout: true, + }, nil } if !strings.Contains(buildOutput, ",") { // expect default --output - return define.BuildOutputOption{Path: buildOutput, + return define.BuildOutputOption{ + Path: buildOutput, IsDir: true, - IsStdout: false}, nil + IsStdout: false, + }, nil } isDir := true isStdout := false @@ -712,9 +719,11 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { if isDir { return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput) } - return define.BuildOutputOption{Path: "", + return define.BuildOutputOption{ + Path: "", IsDir: false, - IsStdout: true}, nil + IsStdout: true, + }, nil } return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil @@ -750,7 +759,7 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti if options.AttestationURL == option { options.AttestationURL = strings.TrimPrefix(option, "attestation-url=") } - case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="): + case strings.HasPrefix(option, "passphrase="): options.Convert = true options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=") case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="): @@ -801,7 +810,7 @@ func SBOMScanOptions(c *cobra.Command) (*define.SBOMScanOptions, error) { } // SBOMScanOptionsFromFlagSet parses scan settings from the cli -func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.SBOMScanOptions, error) { +func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, _ func(name string) *pflag.Flag) (*define.SBOMScanOptions, error) { preset, err := flags.GetString("sbom") if err != nil { return nil, fmt.Errorf("invalid value for --sbom: %w", err) @@ -866,7 +875,7 @@ func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name str } // IDMappingOptions parses the build options related to user namespaces and ID mapping. -func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { +func IDMappingOptions(c *cobra.Command, _ define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) } @@ -1209,7 +1218,7 @@ func Device(device string) (string, string, string, error) { // isValidDeviceMode checks if the mode for device is valid or not. // isValid mode is a composition of r (read), w (write), and m (mknod). func isValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]struct{}{ + legalDeviceMode := map[rune]struct{}{ 'r': {}, 'w': {}, 'm': {}, @@ -1233,7 +1242,6 @@ func GetTempDir() string { // Secrets parses the --secret flag func Secrets(secrets []string) (map[string]define.Secret, error) { - invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") parsed := make(map[string]define.Secret) for _, secret := range secrets { tokens := strings.Split(secret, ",") @@ -1253,10 +1261,12 @@ func Secrets(secrets []string) (map[string]define.Secret, error) { return nil, errors.New("invalid secret type, must be file or env") } typ = kv[1] + default: + return nil, errInvalidSecretSyntax } } if id == "" { - return nil, invalidSyntax + return nil, errInvalidSecretSyntax } if src == "" { src = id @@ -1281,11 +1291,11 @@ func Secrets(secrets []string) (map[string]define.Secret, error) { src = fullPath } newSecret := define.Secret{ + ID: id, Source: src, SourceType: typ, } parsed[id] = newSecret - } return parsed, nil } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go index ebb8ad491..3417cb8c9 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin -// +build linux darwin package parse diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go index e3d3a71b8..634220f5e 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package parse diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go index 317046fc3..71fa60f1a 100644 --- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package rusage diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go index 54ed77fad..47676dd77 100644 --- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package rusage diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index ec28482b0..11e9477e2 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -64,7 +64,6 @@ func newAgentServerSocket(socketPath string) (*AgentServer, error) { conn: &conn, shutdown: make(chan bool, 1), }, nil - } // Serve starts the SSH agent on the host and returns the path of the socket where the agent is serving @@ -104,7 +103,7 @@ func (a *AgentServer) Serve(processLabel string) (string, error) { go func() { for { - //listener.Accept blocks + // listener.Accept blocks c, err := listener.Accept() if err != nil { select { diff --git a/vendor/github.com/containers/buildah/pkg/util/resource_unix.go b/vendor/github.com/containers/buildah/pkg/util/resource_unix.go index 4f7c08cfc..cc62f860c 100644 --- a/vendor/github.com/containers/buildah/pkg/util/resource_unix.go +++ b/vendor/github.com/containers/buildah/pkg/util/resource_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin package util diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go b/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go index a27a48019..163e5762d 100644 --- a/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go @@ -3,8 +3,8 @@ package util import ( "bytes" "errors" - "time" "os" + "time" ) func ReadUptime() (time.Duration, error) { diff --git a/vendor/github.com/containers/buildah/pkg/util/version_unix.go b/vendor/github.com/containers/buildah/pkg/util/version_unix.go index 471e10e4a..c8b7002a3 100644 --- a/vendor/github.com/containers/buildah/pkg/util/version_unix.go +++ b/vendor/github.com/containers/buildah/pkg/util/version_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package util diff --git a/vendor/github.com/containers/buildah/pkg/util/version_windows.go b/vendor/github.com/containers/buildah/pkg/util/version_windows.go index 9acf469f1..35a1e3480 100644 --- a/vendor/github.com/containers/buildah/pkg/util/version_windows.go +++ b/vendor/github.com/containers/buildah/pkg/util/version_windows.go @@ -6,5 +6,4 @@ import ( func ReadKernelVersion() (string, error) { return "", errors.New("readKernelVersion not supported on windows") - } diff --git a/vendor/github.com/containers/buildah/pkg/volumes/volumes.go b/vendor/github.com/containers/buildah/pkg/volumes/volumes.go new file mode 100644 index 000000000..aa469a282 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/volumes/volumes.go @@ -0,0 +1,13 @@ +package volumes + +import ( + "os" + + "github.com/containers/buildah/internal/volumes" +) + +// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it. +func CleanCacheMount() error { + cacheParent := volumes.CacheParent() + return os.RemoveAll(cacheParent) +} diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 13c2bfbbc..c8b071cde 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -62,7 +62,7 @@ type PullOptions struct { // Pull copies the contents of the image from somewhere else to local storage. Returns the // ID of the local image or an error. -func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) { +func Pull(_ context.Context, imageName string, options PullOptions) (imageID string, err error) { libimageOptions := &libimage.PullOptions{} libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath libimageOptions.Writer = options.ReportWriter diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index fa5a1c2c9..b162a81b8 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -222,9 +222,10 @@ type IDMaps struct { // netResult type to hold network info for hosts/resolv.conf type netResult struct { - entries etchosts.HostEntries - dnsServers []string - excludeIPs []net.IP - ipv6 bool - keepHostResolvers bool + entries etchosts.HostEntries + dnsServers []string + excludeIPs []net.IP + ipv6 bool + keepHostResolvers bool + preferredHostContainersInternalIP string } diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index 0076b71eb..8ea14bdfc 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package buildah @@ -27,6 +26,7 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" + "github.com/containers/buildah/internal/tmpdir" internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/internal/volumes" "github.com/containers/buildah/pkg/overlay" @@ -85,7 +85,8 @@ func (b *Builder) createResolvConf(rdir string, chownOpts *idtools.IDPair) (stri // addResolvConf copies files from host and sets them up to bind mount into container func (b *Builder) addResolvConfEntries(file string, networkNameServer []string, - spec *specs.Spec, keepHostServers, ipv6 bool) error { + spec *specs.Spec, keepHostServers, ipv6 bool, +) error { defaultConfig, err := config.Default() if err != nil { return fmt.Errorf("failed to get config: %w", err) @@ -152,7 +153,7 @@ func (b *Builder) createHostsFile(rdir string, chownOpts *idtools.IDPair) (strin return targetfile, nil } -func (b *Builder) addHostsEntries(file, imageRoot string, entries etchosts.HostEntries, exculde []net.IP) error { +func (b *Builder) addHostsEntries(file, imageRoot string, entries etchosts.HostEntries, exclude []net.IP, preferIP string) error { conf, err := config.Default() if err != nil { return err @@ -163,11 +164,15 @@ func (b *Builder) addHostsEntries(file, imageRoot string, entries etchosts.HostE return err } return etchosts.New(&etchosts.Params{ - BaseFile: base, - ExtraHosts: b.CommonBuildOpts.AddHost, - HostContainersInternalIP: etchosts.GetHostContainersInternalIPExcluding(conf, nil, nil, exculde), - TargetFile: file, - ContainerIPs: entries, + BaseFile: base, + ExtraHosts: b.CommonBuildOpts.AddHost, + HostContainersInternalIP: etchosts.GetHostContainersInternalIP(etchosts.HostContainersInternalOptions{ + Conf: conf, + Exclude: exclude, + PreferIP: preferIP, + }), + TargetFile: file, + ContainerIPs: entries, }) } @@ -180,7 +185,7 @@ func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDP hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname))) cfile := filepath.Join(rdir, filepath.Base(hostnamePath)) - if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil { + if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0o644); err != nil { return "", fmt.Errorf("writing /etc/hostname into the container: %w", err) } @@ -258,7 +263,7 @@ func runLookupPath(g *generate.Generator, command []string) []string { // check if it's there, if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { // and if it's not a directory, and either a symlink or executable, - if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { + if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0o111 != 0)) { // use that. return append([]string{candidate}, command[1:]...) } @@ -440,7 +445,8 @@ func waitForSync(pipeR *os.File) error { } func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, - containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { + containerCreateW io.WriteCloser, containerStartR io.ReadCloser, +) (wstatus unix.WaitStatus, err error) { if options.Logger == nil { options.Logger = logrus.StandardLogger() } @@ -466,7 +472,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [ if err != nil { return 1, fmt.Errorf("encoding configuration %#v as json: %w", spec, err) } - if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0o600); err != nil { return 1, fmt.Errorf("storing runtime configuration: %w", err) } @@ -1138,7 +1144,8 @@ func runUsingRuntimeMain() { } func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, networkString string, - moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile, resolvFile string) (err error) { + moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile, resolvFile string, +) (err error) { // Lock the caller to a single OS-level thread. runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -1253,7 +1260,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run // only add hosts if we manage the hosts file if hostsFile != "" { - err = b.addHostsEntries(hostsFile, rootPath, netResult.entries, netResult.excludeIPs) + err = b.addHostsEntries(hostsFile, rootPath, netResult.entries, netResult.excludeIPs, netResult.preferredHostContainersInternalIP) if err != nil { return err } @@ -1340,8 +1347,8 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st } // Get host UID and GID of the container process. - var uidMap = []specs.LinuxIDMapping{} - var gidMap = []specs.LinuxIDMapping{} + uidMap := []specs.LinuxIDMapping{} + gidMap := []specs.LinuxIDMapping{} if spec.Linux != nil { uidMap = spec.Linux.UIDMappings gidMap = spec.Linux.GIDMappings @@ -1381,7 +1388,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st } // Get the list of explicitly-specified volume mounts. - var mountLabel = "" + mountLabel := "" if spec.Linux != nil { mountLabel = spec.Linux.MountLabel } @@ -1442,7 +1449,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin return nil, err } logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume) - if err = os.MkdirAll(volumePath, 0755); err != nil { + if err = os.MkdirAll(volumePath, 0o755); err != nil { return nil, err } if err = relabel(volumePath, mountLabel, false); err != nil { @@ -1598,7 +1605,7 @@ func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources mountImages = append(mountImages, image) } case "tmpfs": - mountSpec, err = b.getTmpfsMount(tokens, idMaps) + mountSpec, err = b.getTmpfsMount(tokens, idMaps, sources.WorkDir) if err != nil { return nil, nil, err } @@ -1658,9 +1665,9 @@ func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContex return &volumes[0], image, nil } -func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) { +func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps, workDir string) (*specs.Mount, error) { var optionMounts []specs.Mount - mount, err := volumes.GetTmpfsMount(tokens) + mount, err := volumes.GetTmpfsMount(tokens, workDir) if err != nil { return nil, err } @@ -1681,7 +1688,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr var id, target string var required bool var uid, gid uint32 - var mode uint32 = 0400 + var mode uint32 = 0o400 for _, val := range tokens { kv := strings.SplitN(val, "=", 2) switch kv[0] { @@ -1729,7 +1736,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr if id == "" { return nil, "", errInvalidSyntax } - // Default location for secretis is /run/secrets/id + // Default location for secrets is /run/secrets/id if target == "" { target = "/run/secrets/" + id } @@ -1737,7 +1744,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr secr, ok := secrets[id] if !ok { if required { - return nil, "", fmt.Errorf("secret required but no secret with id %s found", id) + return nil, "", fmt.Errorf("secret required but no secret with id %q found", id) } return nil, "", nil } @@ -1748,7 +1755,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr switch secr.SourceType { case "env": data = []byte(os.Getenv(secr.Source)) - tmpFile, err := os.CreateTemp(define.TempDir, "buildah*") + tmpFile, err := os.CreateTemp(tmpdir.GetTempDir(), "buildah*") if err != nil { return nil, "", err } @@ -1768,17 +1775,17 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr if err != nil { return nil, "", err } - ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) + ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", digest.FromString(id).Encoded()[:16]) default: return nil, "", errors.New("invalid source secret type") } // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, // chown and relabel it for the container user and we don't want to mess with the original file - if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0o755); err != nil { return nil, "", err } - if err := os.WriteFile(ctrFileOnHost, data, 0644); err != nil { + if err := os.WriteFile(ctrFileOnHost, data, 0o644); err != nil { return nil, "", err } @@ -1812,7 +1819,7 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string] var id, target string var required bool var uid, gid uint32 - var mode uint32 = 400 + var mode uint32 = 0o600 for _, val := range tokens { kv := strings.SplitN(val, "=", 2) if len(kv) < 2 { @@ -1857,7 +1864,7 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string] if id == "" { id = "default" } - // Default location for secretis is /run/buildkit/ssh_agent.{i} + // Default location for secrets is /run/buildkit/ssh_agent.{i} if target == "" { target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count) } @@ -1924,7 +1931,7 @@ func (b *Builder) cleanupTempVolumes() { for tempVolume, val := range b.TempVolumes { if val { if err := overlay.RemoveTemp(tempVolume); err != nil { - b.Logger.Errorf(err.Error()) + b.Logger.Error(err.Error()) } b.TempVolumes[tempVolume] = false } @@ -1940,7 +1947,7 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint } } - //cleanup any mounted images for this run + // cleanup any mounted images for this run for _, image := range artifacts.MountedImages { if image != "" { // if flow hits here some image was mounted for this run diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go index 341c80c4f..79ea3a44d 100644 --- a/vendor/github.com/containers/buildah/run_freebsd.go +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package buildah @@ -46,16 +45,14 @@ const ( PROC_REAP_RELEASE = 3 ) -var ( - // We dont want to remove destinations with /etc, /dev as - // rootfs already contains these files and unionfs will create - // a `whiteout` i.e `.wh` files on removal of overlapping - // files from these directories. everything other than these - // will be cleaned up - nonCleanablePrefixes = []string{ - "/etc", "/dev", - } -) +// We dont want to remove destinations with /etc, /dev as +// rootfs already contains these files and unionfs will create +// a `whiteout` i.e `.wh` files on removal of overlapping +// files from these directories. everything other than these +// will be cleaned up +var nonCleanablePrefixes = []string{ + "/etc", "/dev", +} func procctl(idtype int, id int, cmd int, arg *byte) error { _, _, e1 := unix.Syscall6( @@ -127,10 +124,16 @@ func (b *Builder) Run(command []string, options RunOptions) error { return err } + workDir := b.WorkDir() if options.WorkingDir != "" { g.SetProcessCwd(options.WorkingDir) + workDir = options.WorkingDir } else if b.WorkDir() != "" { g.SetProcessCwd(b.WorkDir()) + workDir = b.WorkDir() + } + if workDir == "" { + workDir = string(os.PathSeparator) } mountPoint, err := b.Mount(b.MountLabel) if err != nil { @@ -185,7 +188,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { uid, gid := spec.Process.User.UID, spec.Process.User.GID idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} - mode := os.FileMode(0755) + mode := os.FileMode(0o755) coptions := copier.MkdirOptions{ ChownNew: idPair, ChmodNew: &mode, @@ -226,7 +229,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { }) } } - err = b.addHostsEntries(hostsFile, mountPoint, entries, nil) + err = b.addHostsEntries(hostsFile, mountPoint, entries, nil, "") if err != nil { return err } @@ -244,7 +247,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { // Only add entries here if we do not have to do setup network, // if we do we have to do it much later after the network setup. if !configureNetwork { - err = b.addResolvConfEntries(resolvFile, nil, nil, false, true) + err = b.addResolvConfEntries(resolvFile, nil, spec, false, true) if err != nil { return err } @@ -252,6 +255,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { } runMountInfo := runMountInfo{ + WorkDir: workDir, ContextDir: options.ContextDir, Secrets: options.Secrets, SSHSources: options.SSHSources, @@ -536,7 +540,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions namespaceOptions.AddOrReplace(options.NamespaceOptions...) networkPolicy := options.ConfigureNetwork - //Nothing was specified explicitly so network policy should be inherited from builder + // Nothing was specified explicitly so network policy should be inherited from builder if networkPolicy == NetworkDefault { networkPolicy = b.ConfigureNetwork diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 38a77ecf8..71d92323e 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package buildah @@ -10,6 +9,7 @@ import ( "os" "path/filepath" "strings" + "sync" "syscall" "github.com/containers/buildah/bind" @@ -19,6 +19,7 @@ import ( "github.com/containers/buildah/internal" "github.com/containers/buildah/internal/tmpdir" "github.com/containers/buildah/internal/volumes" + "github.com/containers/buildah/pkg/binfmt" "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/parse" butil "github.com/containers/buildah/pkg/util" @@ -59,6 +60,9 @@ var ( nonCleanablePrefixes = []string{ "/etc", "/dev", "/sys", "/proc", } + // binfmtRegistered makes sure we only try to register binfmt_misc + // interpreters once, the first time we handle a RUN instruction. + binfmtRegistered sync.Once ) func setChildProcess() error { @@ -161,6 +165,21 @@ func separateDevicesFromRuntimeSpec(g *generate.Generator) define.ContainerDevic // Run runs the specified command in the container's root filesystem. func (b *Builder) Run(command []string, options RunOptions) error { + if os.Getenv("container") != "" { + os, arch, variant, err := parse.Platform("") + if err != nil { + return fmt.Errorf("reading the current default platform") + } + platform := b.OCIv1.Platform + if os != platform.OS || arch != platform.Architecture || variant != platform.Variant { + binfmtRegistered.Do(func() { + if err := binfmt.Register(nil); err != nil { + logrus.Warnf("registering binfmt_misc interpreters: %v", err) + } + }) + } + } + p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package) if err != nil { return err @@ -218,6 +237,10 @@ func (b *Builder) Run(command []string, options RunOptions) error { workDir = options.WorkingDir } else if b.WorkDir() != "" { g.SetProcessCwd(b.WorkDir()) + workDir = b.WorkDir() + } + if workDir == "" { + workDir = string(os.PathSeparator) } setupSelinux(g, b.ProcessLabel, b.MountLabel) mountPoint, err := b.Mount(b.MountLabel) @@ -346,7 +369,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} - mode := os.FileMode(0755) + mode := os.FileMode(0o755) coptions := copier.MkdirOptions{ ChownNew: idPair, ChmodNew: &mode, @@ -396,7 +419,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { }) } } - err = b.addHostsEntries(hostsFile, mountPoint, entries, nil) + err = b.addHostsEntries(hostsFile, mountPoint, entries, nil, "") if err != nil { return err } @@ -432,7 +455,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { // Empty file, so no need to recreate if it exists if _, ok := bindFiles["/run/.containerenv"]; !ok { containerenvPath := filepath.Join(path, "/run/.containerenv") - if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil { + if err = os.MkdirAll(filepath.Dir(containerenvPath), 0o755); err != nil { return err } @@ -450,7 +473,7 @@ imageid=%q rootless=%d `, define.Version, b.Container, b.ContainerID, b.FromImage, b.FromImageID, rootless) - if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil { + if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0o755); err != nil { return err } if err := relabel(containerenvPath, b.MountLabel, false); err != nil { @@ -655,7 +678,7 @@ func setupSlirp4netnsNetwork(config *config.Config, netns, cid string, options, } func setupPasta(config *config.Config, netns string, options, hostnames []string) (func(), *netResult, error) { - res, err := pasta.Setup2(&pasta.SetupOptions{ + res, err := pasta.Setup(&pasta.SetupOptions{ Config: config, Netns: netns, ExtraOptions: options, @@ -669,12 +692,18 @@ func setupPasta(config *config.Config, netns string, options, hostnames []string entries = etchosts.HostEntries{{IP: res.IPAddresses[0].String(), Names: hostnames}} } + mappedIP := "" + if len(res.MapGuestAddrIPs) > 0 { + mappedIP = res.MapGuestAddrIPs[0] + } + result := &netResult{ - entries: entries, - dnsServers: res.DNSForwardIPs, - excludeIPs: res.IPAddresses, - ipv6: res.IPv6, - keepHostResolvers: true, + entries: entries, + dnsServers: res.DNSForwardIPs, + excludeIPs: res.IPAddresses, + ipv6: res.IPv6, + keepHostResolvers: true, + preferredHostContainersInternalIP: mappedIP, } return nil, result, nil @@ -786,7 +815,7 @@ func runMakeStdioPipe(uid, gid int) ([][]int, error) { return stdioPipe, nil } -func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) { +func setupNamespaces(_ *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) { defaultContainerConfig, err := config.Default() if err != nil { return false, "", false, fmt.Errorf("failed to get container config: %w", err) @@ -916,7 +945,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions namespaceOptions.AddOrReplace(options.NamespaceOptions...) networkPolicy := options.ConfigureNetwork - //Nothing was specified explicitly so network policy should be inherited from builder + // Nothing was specified explicitly so network policy should be inherited from builder if networkPolicy == NetworkDefault { networkPolicy = b.ConfigureNetwork @@ -1193,9 +1222,6 @@ func setupCapAdd(g *generate.Generator, caps ...string) error { if err := g.AddProcessCapabilityPermitted(cap); err != nil { return fmt.Errorf("adding %q to the permitted capability set: %w", cap, err) } - if err := g.AddProcessCapabilityAmbient(cap); err != nil { - return fmt.Errorf("adding %q to the ambient capability set: %w", cap, err) - } } return nil } @@ -1211,9 +1237,6 @@ func setupCapDrop(g *generate.Generator, caps ...string) error { if err := g.DropProcessCapabilityPermitted(cap); err != nil { return fmt.Errorf("removing %q from the permitted capability set: %w", cap, err) } - if err := g.DropProcessCapabilityAmbient(cap); err != nil { - return fmt.Errorf("removing %q from the ambient capability set: %w", cap, err) - } } return nil } @@ -1296,10 +1319,10 @@ func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mou if err != nil { return nil, err } - gid5Available = checkIdsGreaterThan5(gids) + gid5Available = checkIDsGreaterThan5(gids) } if gid5Available && len(spec.Linux.GIDMappings) > 0 { - gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings) + gid5Available = checkIDsGreaterThan5(spec.Linux.GIDMappings) } if !gid5Available { // If we have no GID mappings, the gid=5 default option would fail, so drop it. @@ -1370,7 +1393,7 @@ func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mou return mounts, nil } -func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool { +func checkIDsGreaterThan5(ids []specs.LinuxIDMapping) bool { for _, r := range ids { if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size { return true diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go index 68a3dac24..6677d2c8d 100644 --- a/vendor/github.com/containers/buildah/run_unix.go +++ b/vendor/github.com/containers/buildah/run_unix.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package buildah @@ -24,6 +23,7 @@ func runUsingRuntimeMain() {} func (b *Builder) Run(command []string, options RunOptions) error { return errors.New("function not supported on non-linux systems") } + func DefaultNamespaceOptions() (NamespaceOptions, error) { options := NamespaceOptions{ {Name: string(specs.CgroupNamespace), Host: false}, diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go index b135be7e5..66346d291 100644 --- a/vendor/github.com/containers/buildah/run_unsupported.go +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin && !freebsd -// +build !linux,!darwin,!freebsd package buildah @@ -19,6 +18,7 @@ func runUsingRuntimeMain() {} func (b *Builder) Run(command []string, options RunOptions) error { return errors.New("function not supported on non-linux systems") } + func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go index 3348a3e1d..b48174742 100644 --- a/vendor/github.com/containers/buildah/seccomp.go +++ b/vendor/github.com/containers/buildah/seccomp.go @@ -1,5 +1,4 @@ //go:build seccomp && linux -// +build seccomp,linux package buildah diff --git a/vendor/github.com/containers/buildah/seccomp_unsupported.go b/vendor/github.com/containers/buildah/seccomp_unsupported.go index cba8390c5..3a6801e80 100644 --- a/vendor/github.com/containers/buildah/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/seccomp_unsupported.go @@ -1,4 +1,4 @@ -// +build !seccomp !linux +//go:build !seccomp || !linux package buildah diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index 8cc2bfc62..bd28118c2 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package buildah diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go index 5b1948315..4519935bf 100644 --- a/vendor/github.com/containers/buildah/selinux_unsupported.go +++ b/vendor/github.com/containers/buildah/selinux_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux +//go:build !linux package buildah diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index c4218a642..9676e30e7 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -143,20 +143,19 @@ func ReserveSELinuxLabels(store storage.Store, id string) error { for _, c := range containers { if id == c.ID { continue - } else { - b, err := OpenBuilder(store, c.ID) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Ignore not exist errors since containers probably created by other tool - // TODO, we need to read other containers json data to reserve their SELinux labels - continue - } - return err - } - // Prevent different containers from using same MCS label - if err := label.ReserveLabel(b.ProcessLabel); err != nil { - return fmt.Errorf("reserving SELinux label %q: %w", b.ProcessLabel, err) + } + b, err := OpenBuilder(store, c.ID) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Ignore not exist errors since containers probably created by other tool + // TODO, we need to read other containers json data to reserve their SELinux labels + continue } + return err + } + // Prevent different containers from using same MCS label + if err := label.ReserveLabel(b.ProcessLabel); err != nil { + return fmt.Errorf("reserving SELinux label %q: %w", b.ProcessLabel, err) } } } diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index b72353af2..40002d503 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -35,14 +35,12 @@ const ( DefaultTransport = "docker://" ) -var ( - // RegistryDefaultPathPrefix contains a per-registry listing of default prefixes - // to prepend to image names that only contain a single path component. - RegistryDefaultPathPrefix = map[string]string{ - "index.docker.io": "library", - "docker.io": "library", - } -) +// RegistryDefaultPathPrefix contains a per-registry listing of default prefixes +// to prepend to image names that only contain a single path component. +var RegistryDefaultPathPrefix = map[string]string{ + "index.docker.io": "library", + "docker.io": "library", +} // StringInSlice is deprecated, use golang.org/x/exp/slices.Contains func StringInSlice(s string, slice []string) bool { @@ -138,10 +136,10 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora return expanded, nil } -// FindImage locates the locally-stored image which corresponds to a given name. -// Please note that the `firstRegistry` argument has been deprecated and has no +// FindImage locates the locally-stored image which corresponds to a given +// name. Please note that the second argument has been deprecated and has no // effect anymore. -func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) { +func FindImage(store storage.Store, _ string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) { runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) if err != nil { return nil, nil, err @@ -190,9 +188,8 @@ func ResolveNameToReferences( } // AddImageNames adds the specified names to the specified image. Please note -// that the `firstRegistry` argument has been deprecated and has no effect -// anymore. -func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error { +// that the second argument has been deprecated and has no effect anymore. +func AddImageNames(store storage.Store, _ string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error { runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) if err != nil { return err diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go index 21c55a3ca..507e4892c 100644 --- a/vendor/github.com/containers/buildah/util/util_unix.go +++ b/vendor/github.com/containers/buildah/util/util_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin || freebsd || netbsd -// +build linux darwin freebsd netbsd package util diff --git a/vendor/github.com/containers/buildah/util/util_unsupported.go b/vendor/github.com/containers/buildah/util/util_unsupported.go index 05a68f60b..36d0664dd 100644 --- a/vendor/github.com/containers/buildah/util/util_unsupported.go +++ b/vendor/github.com/containers/buildah/util/util_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux +//go:build !linux package util diff --git a/vendor/github.com/containers/buildah/util/util_windows.go b/vendor/github.com/containers/buildah/util/util_windows.go index d11e894e0..0a8fa5946 100644 --- a/vendor/github.com/containers/buildah/util/util_windows.go +++ b/vendor/github.com/containers/buildah/util/util_windows.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package util diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 1f4f925fe..b7fa7534d 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -60,6 +60,13 @@ type CopyOptions struct { CertDirPath string // Force layer compression when copying to a `dir` transport destination. DirForceCompress bool + + // ImageListSelection is one of CopySystemImage, CopyAllImages, or + // CopySpecificImages, to control whether, when the source reference is a list, + // copy.Image() copies only an image which matches the current runtime + // environment, or all images which match the supplied reference, or only + // specific images from the source reference. + ImageListSelection copy.ImageListSelection // Allow contacting registries over HTTP, or HTTPS with failed TLS // verification. Note that this does not affect other TLS connections. InsecureSkipTLSVerify types.OptionalBool @@ -153,8 +160,8 @@ type CopyOptions struct { extendTimeoutSocket string } -// copier is an internal helper to conveniently copy images. -type copier struct { +// Copier is a helper to conveniently copy images. +type Copier struct { extendTimeoutSocket string imageCopyOptions copy.Options retryOptions retry.Options @@ -165,6 +172,13 @@ type copier struct { destinationLookup LookupReferenceFunc } +// newCopier creates a Copier based on a runtime's system context. +// Note that fields in options *may* overwrite the counterparts of +// the specified system context. Please make sure to call `(*Copier).Close()`. +func (r *Runtime) newCopier(options *CopyOptions, reportResolvedReference *types.ImageReference) (*Copier, error) { + return NewCopier(options, r.SystemContext(), reportResolvedReference) +} + // storageAllowedPolicyScopes overrides the policy for local storage // to ensure that we can read images from it. var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ @@ -206,12 +220,13 @@ func getDockerAuthConfig(name, passwd, creds, idToken string) (*types.DockerAuth } } -// newCopier creates a copier. Note that fields in options *may* overwrite the -// counterparts of the specified system context. Please make sure to call -// `(*copier).close()`. -func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { - c := copier{extendTimeoutSocket: options.extendTimeoutSocket} - c.systemContext = r.systemContextCopy() +// NewCopier creates a Copier based on a provided system context. +// Note that fields in options *may* overwrite the counterparts of +// the specified system context. Please make sure to call `(*Copier).Close()`. +func NewCopier(options *CopyOptions, sc *types.SystemContext, reportResolvedReference *types.ImageReference) (*Copier, error) { + c := Copier{extendTimeoutSocket: options.extendTimeoutSocket} + sysContextCopy := *sc + c.systemContext = &sysContextCopy if options.SourceLookupReferenceFunc != nil { c.sourceLookup = options.SourceLookupReferenceFunc @@ -300,6 +315,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.imageCopyOptions.ProgressInterval = time.Second } + c.imageCopyOptions.ImageListSelection = options.ImageListSelection c.imageCopyOptions.ForceCompressionFormat = options.ForceCompressionFormat c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType c.imageCopyOptions.SourceCtx = c.systemContext @@ -314,6 +330,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.imageCopyOptions.SignBySigstorePrivateKeyFile = options.SignBySigstorePrivateKeyFile c.imageCopyOptions.SignSigstorePrivateKeyPassphrase = options.SignSigstorePrivateKeyPassphrase c.imageCopyOptions.ReportWriter = options.Writer + c.imageCopyOptions.ReportResolvedReference = reportResolvedReference defaultContainerConfig, err := config.Default() if err != nil { @@ -325,14 +342,14 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { return &c, nil } -// close open resources. -func (c *copier) close() error { +// Close open resources. +func (c *Copier) Close() error { return c.policyContext.Destroy() } -// copy the source to the destination. Returns the bytes of the copied +// Copy the source to the destination. Returns the bytes of the copied // manifest which may be used for digest computation. -func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) { +func (c *Copier) Copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) { logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport()) // Avoid running out of time when running inside a systemd unit by diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index 6264b25ec..7b7b56ad7 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -31,12 +31,12 @@ type ImageDiskUsage struct { // storage. Note that a single image may yield multiple usage reports, one for // each repository tag. func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error) { - images, err := r.ListImages(ctx, nil, nil) + images, layers, err := r.getImagesAndLayers() if err != nil { return nil, -1, err } - layerTree, err := r.layerTree(ctx, images) + layerTree, err := r.newLayerTreeFromData(images, layers) if err != nil { return nil, -1, err } diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 2465d370d..aa11d54f5 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -19,19 +19,22 @@ import ( // filterFunc is a prototype for a positive image filter. Returning `true` // indicates that the image matches the criteria. -type filterFunc func(*Image) (bool, error) +type filterFunc func(*Image, *layerTree) (bool, error) + +type compiledFilters map[string][]filterFunc // Apply the specified filters. All filters of each key must apply. -func (i *Image) applyFilters(ctx context.Context, filters map[string][]filterFunc) (bool, error) { +// tree must be provided if compileImageFilters indicated it is necessary. +func (i *Image) applyFilters(ctx context.Context, filters compiledFilters, tree *layerTree) (bool, error) { for key := range filters { for _, filter := range filters[key] { - matches, err := filter(i) + matches, err := filter(i, tree) if err != nil { // Some images may have been corrupted in the // meantime, so do an extra check and make the // error non-fatal (see containers/podman/issues/12582). if errCorrupted := i.isCorrupted(ctx, ""); errCorrupted != nil { - logrus.Errorf(errCorrupted.Error()) + logrus.Error(errCorrupted.Error()) return false, nil } return false, err @@ -47,18 +50,11 @@ func (i *Image) applyFilters(ctx context.Context, filters map[string][]filterFun // filterImages returns a slice of images which are passing all specified // filters. -func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *ListImagesOptions) ([]*Image, error) { - if len(options.Filters) == 0 || len(images) == 0 { - return images, nil - } - - filters, err := r.compileImageFilters(ctx, options) - if err != nil { - return nil, err - } +// tree must be provided if compileImageFilters indicated it is necessary. +func (r *Runtime) filterImages(ctx context.Context, images []*Image, filters compiledFilters, tree *layerTree) ([]*Image, error) { result := []*Image{} for i := range images { - match, err := images[i].applyFilters(ctx, filters) + match, err := images[i].applyFilters(ctx, filters, tree) if err != nil { return nil, err } @@ -73,25 +69,19 @@ func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *Li // required format is `key=value` with the following supported keys: // // after, since, before, containers, dangling, id, label, readonly, reference, intermediate -func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { +// +// compileImageFilters returns: compiled filters, if LayerTree is needed, error +func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (compiledFilters, bool, error) { logrus.Tracef("Parsing image filters %s", options.Filters) - - var tree *layerTree - getTree := func() (*layerTree, error) { - if tree == nil { - t, err := r.layerTree(ctx, nil) - if err != nil { - return nil, err - } - tree = t - } - return tree, nil + if len(options.Filters) == 0 { + return nil, false, nil } filterInvalidValue := `invalid image filter %q: must be in the format "filter=value or filter!=value"` var wantedReferenceMatches, unwantedReferenceMatches []string - filters := map[string][]filterFunc{} + filters := compiledFilters{} + needsLayerTree := false duplicate := map[string]string{} for _, f := range options.Filters { var key, value string @@ -103,7 +93,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp } else { split = strings.SplitN(f, "=", 2) if len(split) != 2 { - return nil, fmt.Errorf(filterInvalidValue, f) + return nil, false, fmt.Errorf(filterInvalidValue, f) } } @@ -113,7 +103,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp case "after", "since": img, err := r.time(key, value) if err != nil { - return nil, err + return nil, false, err } key = "since" filter = filterAfter(img.Created()) @@ -121,27 +111,23 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp case "before": img, err := r.time(key, value) if err != nil { - return nil, err + return nil, false, err } filter = filterBefore(img.Created()) case "containers": if err := r.containers(duplicate, key, value, options.IsExternalContainerFunc); err != nil { - return nil, err + return nil, false, err } filter = filterContainers(value, options.IsExternalContainerFunc) case "dangling": dangling, err := r.bool(duplicate, key, value) if err != nil { - return nil, err - } - t, err := getTree() - if err != nil { - return nil, err + return nil, false, err } - - filter = filterDangling(ctx, dangling, t) + needsLayerTree = true + filter = filterDangling(ctx, dangling) case "id": filter = filterID(value) @@ -149,35 +135,31 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp case "digest": f, err := filterDigest(value) if err != nil { - return nil, err + return nil, false, err } filter = f case "intermediate": intermediate, err := r.bool(duplicate, key, value) if err != nil { - return nil, err - } - t, err := getTree() - if err != nil { - return nil, err + return nil, false, err } - - filter = filterIntermediate(ctx, intermediate, t) + needsLayerTree = true + filter = filterIntermediate(ctx, intermediate) case "label": filter = filterLabel(ctx, value) case "readonly": readOnly, err := r.bool(duplicate, key, value) if err != nil { - return nil, err + return nil, false, err } filter = filterReadOnly(readOnly) case "manifest": manifest, err := r.bool(duplicate, key, value) if err != nil { - return nil, err + return nil, false, err } filter = filterManifest(ctx, manifest) @@ -192,12 +174,12 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp case "until": until, err := r.until(value) if err != nil { - return nil, err + return nil, false, err } filter = filterBefore(until) default: - return nil, fmt.Errorf(filterInvalidValue, key) + return nil, false, fmt.Errorf(filterInvalidValue, key) } if negate { filter = negateFilter(filter) @@ -210,12 +192,12 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter := filterReferences(r, wantedReferenceMatches, unwantedReferenceMatches) filters["reference"] = append(filters["reference"], filter) - return filters, nil + return filters, needsLayerTree, nil } func negateFilter(f filterFunc) filterFunc { - return func(img *Image) (bool, error) { - b, err := f(img) + return func(img *Image, tree *layerTree) (bool, error) { + b, err := f(img, tree) return !b, err } } @@ -272,7 +254,7 @@ func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, er // filterManifest filters whether or not the image is a manifest list func filterManifest(ctx context.Context, value bool) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { isManifestList, err := img.IsManifestList(ctx) if err != nil { return false, err @@ -284,7 +266,7 @@ func filterManifest(ctx context.Context, value bool) filterFunc { // filterReferences creates a reference filter for matching the specified wantedReferenceMatches value (OR logic) // and for matching the unwantedReferenceMatches values (AND logic) func filterReferences(r *Runtime, wantedReferenceMatches, unwantedReferenceMatches []string) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { // Empty reference filters, return true if len(wantedReferenceMatches) == 0 && len(unwantedReferenceMatches) == 0 { return true, nil @@ -376,7 +358,7 @@ func imageMatchesReferenceFilter(r *Runtime, img *Image, value string) (bool, er // filterLabel creates a label for matching the specified value. func filterLabel(ctx context.Context, value string) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { labels, err := img.Labels(ctx) if err != nil { return false, err @@ -387,28 +369,28 @@ func filterLabel(ctx context.Context, value string) filterFunc { // filterAfter creates an after filter for matching the specified value. func filterAfter(value time.Time) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { return img.Created().After(value), nil } } // filterBefore creates a before filter for matching the specified value. func filterBefore(value time.Time) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { return img.Created().Before(value), nil } } // filterReadOnly creates a readonly filter for matching the specified value. func filterReadOnly(value bool) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { return img.IsReadOnly() == value, nil } } // filterContainers creates a container filter for matching the specified value. func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { ctrs, err := img.Containers() if err != nil { return false, err @@ -433,8 +415,8 @@ func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { } // filterDangling creates a dangling filter for matching the specified value. -func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc { - return func(img *Image) (bool, error) { +func filterDangling(ctx context.Context, value bool) filterFunc { + return func(img *Image, tree *layerTree) (bool, error) { isDangling, err := img.isDangling(ctx, tree) if err != nil { return false, err @@ -445,7 +427,7 @@ func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc // filterID creates an image-ID filter for matching the specified value. func filterID(value string) filterFunc { - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { return strings.HasPrefix(img.ID(), value), nil } } @@ -455,7 +437,7 @@ func filterDigest(value string) (filterFunc, error) { if !strings.HasPrefix(value, "sha256:") { return nil, fmt.Errorf("invalid value %q for digest filter", value) } - return func(img *Image) (bool, error) { + return func(img *Image, _ *layerTree) (bool, error) { return img.containsDigestPrefix(value), nil }, nil } @@ -463,8 +445,8 @@ func filterDigest(value string) (filterFunc, error) { // filterIntermediate creates an intermediate filter for images. An image is // considered to be an intermediate image if it is dangling (i.e., no tags) and // has no children (i.e., no other image depends on it). -func filterIntermediate(ctx context.Context, value bool, tree *layerTree) filterFunc { - return func(img *Image) (bool, error) { +func filterIntermediate(ctx context.Context, value bool) filterFunc { + return func(img *Image, tree *layerTree) (bool, error) { isIntermediate, err := img.isIntermediate(ctx, tree) if err != nil { return false, err diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index 56f84e37a..845f4088e 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -25,7 +25,7 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { return nil, err } - layerTree, err := i.runtime.layerTree(ctx, nil) + layerTree, err := i.runtime.newFreshLayerTree() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 9cc77cdb2..a971b6c67 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/containerd/containerd/platforms" + "github.com/containerd/platforms" "github.com/containers/common/libimage/platform" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -257,7 +257,7 @@ func (i *Image) TopLayer() string { // Parent returns the parent image or nil if there is none func (i *Image) Parent(ctx context.Context) (*Image, error) { - tree, err := i.runtime.layerTree(ctx, nil) + tree, err := i.runtime.newFreshLayerTree() if err != nil { return nil, err } @@ -291,7 +291,7 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { // created for this invocation only. func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { if tree == nil { - t, err := i.runtime.layerTree(ctx, nil) + t, err := i.runtime.newFreshLayerTree() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go index 8b9d1f4c4..9628a319f 100644 --- a/vendor/github.com/containers/common/libimage/image_tree.go +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -3,7 +3,6 @@ package libimage import ( - "context" "fmt" "strings" @@ -38,7 +37,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) { fmt.Fprintf(sb, "No Image Layers") } - layerTree, err := i.runtime.layerTree(context.Background(), nil) + layerTree, err := i.runtime.newFreshLayerTree() if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index 552c48eae..ea366f775 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -104,13 +104,13 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption return "", err } - c, err := r.newCopier(&options.CopyOptions) + c, err := r.newCopier(&options.CopyOptions, nil) if err != nil { return "", err } - defer c.close() + defer c.Close() - if _, err := c.copy(ctx, srcRef, destRef); err != nil { + if _, err := c.Copy(ctx, srcRef, destRef); err != nil { return "", err } diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index 9af120e25..aeb1d45bb 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -89,21 +89,18 @@ func (l *layerNode) repoTags() ([]string, error) { return orderedTags, nil } -// layerTree extracts a layerTree from the layers in the local storage and -// relates them to the specified images. -func (r *Runtime) layerTree(ctx context.Context, images []*Image) (*layerTree, error) { - layers, err := r.store.Layers() +// newFreshLayerTree extracts a layerTree from consistent layers and images in the local storage. +func (r *Runtime) newFreshLayerTree() (*layerTree, error) { + images, layers, err := r.getImagesAndLayers() if err != nil { return nil, err } + return r.newLayerTreeFromData(images, layers) +} - if images == nil { - images, err = r.ListImages(ctx, nil, nil) - if err != nil { - return nil, err - } - } - +// newLayerTreeFromData extracts a layerTree from the given the layers and images. +// The caller is responsible for (layers, images) being consistent. +func (r *Runtime) newLayerTreeFromData(images []*Image, layers []storage.Layer) (*layerTree, error) { tree := layerTree{ nodes: make(map[string]*layerNode), ociCache: make(map[string]*ociv1.Image), diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index 1a6004c01..0618934c6 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -7,20 +7,28 @@ import ( "errors" "fmt" "maps" + "os" + "path/filepath" "slices" "time" "github.com/containers/common/libimage/define" "github.com/containers/common/libimage/manifests" + manifesterrors "github.com/containers/common/pkg/manifests" + "github.com/containers/common/pkg/supplemented" imageCopy "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/storage" structcopier "github.com/jinzhu/copier" "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" ) // NOTE: the abstractions and APIs here are a first step to further merge @@ -101,8 +109,157 @@ func (r *Runtime) lookupManifestList(name string) (*Image, manifests.List, error return image, list, nil } +// ConvertToManifestList converts the image into a manifest list if it is not +// already also a list. An error is returned if the conversion fails. +func (i *Image) ConvertToManifestList(ctx context.Context) (*ManifestList, error) { + // If we don't need to do anything, don't do anything. + if list, err := i.ToManifestList(); err == nil || !errors.Is(err, ErrNotAManifestList) { + return list, err + } + + // Determine which type we prefer for the new manifest list or image index. + _, imageManifestType, err := i.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading the image's manifest: %w", err) + } + var preferredListType string + switch imageManifestType { + case manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + manifest.DockerV2ListMediaType: + preferredListType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: + preferredListType = imgspecv1.MediaTypeImageIndex + default: + preferredListType = "" + } + + // Create a list and add the image's manifest to it. Use OCI format + // for now. If we need to convert it to Docker format, we'll do that + // while copying it. + list := manifests.Create() + if _, err := list.Add(ctx, &i.runtime.systemContext, i.storageReference, false); err != nil { + return nil, fmt.Errorf("generating new image index: %w", err) + } + listBytes, err := list.Serialize(imgspecv1.MediaTypeImageIndex) + if err != nil { + return nil, fmt.Errorf("serializing image index: %w", err) + } + listDigest, err := manifest.Digest(listBytes) + if err != nil { + return nil, fmt.Errorf("digesting image index: %w", err) + } + + // Build an OCI layout containing the image index as the only item. + tmp, err := os.MkdirTemp("", "") + if err != nil { + return nil, fmt.Errorf("serializing initial list: %w", err) + } + defer os.RemoveAll(tmp) + + // Drop our image index in there. + if err := os.Mkdir(filepath.Join(tmp, imgspecv1.ImageBlobsDir), 0o755); err != nil { + return nil, fmt.Errorf("creating directory for blobs: %w", err) + } + if err := os.Mkdir(filepath.Join(tmp, imgspecv1.ImageBlobsDir, listDigest.Algorithm().String()), 0o755); err != nil { + return nil, fmt.Errorf("creating directory for %s blobs: %w", listDigest.Algorithm().String(), err) + } + listFile := filepath.Join(tmp, imgspecv1.ImageBlobsDir, listDigest.Algorithm().String(), listDigest.Encoded()) + if err := os.WriteFile(listFile, listBytes, 0o644); err != nil { + return nil, fmt.Errorf("writing image index for OCI layout: %w", err) + } + + // Build the index for the layout. + index := imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: []imgspecv1.Descriptor{{ + MediaType: imgspecv1.MediaTypeImageIndex, + Digest: listDigest, + Size: int64(len(listBytes)), + }}, + } + indexBytes, err := json.Marshal(&index) + if err != nil { + return nil, fmt.Errorf("encoding image index for OCI layout: %w", err) + } + + // Write the index for the layout. + indexFile := filepath.Join(tmp, imgspecv1.ImageIndexFile) + if err := os.WriteFile(indexFile, indexBytes, 0o644); err != nil { + return nil, fmt.Errorf("writing top-level index for OCI layout: %w", err) + } + + // Write the "why yes, this is an OCI layout" file. + layoutFile := filepath.Join(tmp, imgspecv1.ImageLayoutFile) + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{Version: imgspecv1.ImageLayoutVersion}) + if err != nil { + return nil, fmt.Errorf("encoding image layout structure for OCI layout: %w", err) + } + if err := os.WriteFile(layoutFile, layoutBytes, 0o644); err != nil { + return nil, fmt.Errorf("writing oci-layout file: %w", err) + } + + // Build an OCI layout reference to use as a source. + tmpRef, err := layout.NewReference(tmp, "") + if err != nil { + return nil, fmt.Errorf("creating reference to directory: %w", err) + } + bundle := supplemented.Reference(tmpRef, []types.ImageReference{i.storageReference}, imageCopy.CopySystemImage, nil) + + // Build a policy that ensures we don't prevent ourselves from reading + // this reference. + signaturePolicy, err := signature.DefaultPolicy(&i.runtime.systemContext) + if err != nil { + return nil, fmt.Errorf("obtaining default signature policy: %w", err) + } + acceptAnything := signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}, + } + signaturePolicy.Transports[i.storageReference.Transport().Name()] = acceptAnything + signaturePolicy.Transports[tmpRef.Transport().Name()] = acceptAnything + policyContext, err := signature.NewPolicyContext(signaturePolicy) + if err != nil { + return nil, fmt.Errorf("creating new signature policy context: %w", err) + } + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Errorf("Destroying signature policy context: %v", err2) + } + }() + + // Copy from the OCI layout into the same image record, so that it gets + // both its own manifest and the image index. + copyOptions := imageCopy.Options{ + ForceManifestMIMEType: imageManifestType, + } + if _, err := imageCopy.Image(ctx, policyContext, i.storageReference, bundle, ©Options); err != nil { + return nil, fmt.Errorf("writing updates to image: %w", err) + } + + // Now explicitly write the list's manifest to the image as its "main" + // manifest. + if _, err := list.SaveToImage(i.runtime.store, i.ID(), i.storageImage.Names, preferredListType); err != nil { + return nil, fmt.Errorf("saving image index: %w", err) + } + + // Reload the record. + if err = i.reload(); err != nil { + return nil, fmt.Errorf("reloading image record: %w", err) + } + mList, err := i.runtime.LookupManifestList(i.storageImage.ID) + if err != nil { + return nil, fmt.Errorf("looking up new manifest list: %w", err) + } + + return mList, nil +} + // ToManifestList converts the image into a manifest list. An error is thrown -// if the image is no manifest list. +// if the image is not a manifest list. func (i *Image) ToManifestList() (*ManifestList, error) { list, err := i.getManifestList() if err != nil { @@ -139,7 +296,7 @@ func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, var return nil, err } - allImages, err := m.image.runtime.ListImages(ctx, nil, nil) + allImages, err := m.image.runtime.ListImages(ctx, nil) if err != nil { return nil, err } @@ -194,6 +351,9 @@ func (m *ManifestList) reload() error { // getManifestList is a helper to obtain a manifest list func (i *Image) getManifestList() (manifests.List, error) { _, list, err := manifests.LoadFromImage(i.runtime.store, i.ID()) + if errors.Is(err, manifesterrors.ErrManifestTypeNotSupported) { + err = fmt.Errorf("%s: %w", err.Error(), ErrNotAManifestList) + } return list, err } @@ -632,11 +792,11 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma // NOTE: we're using the logic in copier to create a proper // types.SystemContext. This prevents us from having an error prone // code duplicate here. - copier, err := m.image.runtime.newCopier(&options.CopyOptions) + copier, err := m.image.runtime.newCopier(&options.CopyOptions, nil) if err != nil { return "", err } - defer copier.close() + defer copier.Close() pushOptions := manifests.PushOptions{ AddCompression: options.AddCompression, diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index a1625bb1b..50c32569a 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -342,8 +342,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in } } // write the index that refers to this one artifact image - tag := "latest" - indexFile := filepath.Join(tmp, "index.json") + indexFile := filepath.Join(tmp, v1.ImageIndexFile) index := v1.Index{ Versioned: imgspec.Versioned{ SchemaVersion: 2, @@ -353,9 +352,6 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in MediaType: v1.MediaTypeImageManifest, Digest: artifactManifestDigest, Size: int64(len(contents)), - Annotations: map[string]string{ - v1.AnnotationRefName: tag, - }, }}, } indexBytes, err := json.Marshal(&index) @@ -366,12 +362,16 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in return nil, fmt.Errorf("writing image index for OCI layout: %w", err) } // write the layout file - layoutFile := filepath.Join(tmp, "oci-layout") - if err := os.WriteFile(layoutFile, []byte(`{"imageLayoutVersion": "1.0.0"}`), 0o644); err != nil { + layoutFile := filepath.Join(tmp, v1.ImageLayoutFile) + layoutBytes, err := json.Marshal(v1.ImageLayout{Version: v1.ImageLayoutVersion}) + if err != nil { + return nil, fmt.Errorf("encoding image layout for OCI layout: %w", err) + } + if err := os.WriteFile(layoutFile, layoutBytes, 0o644); err != nil { return nil, fmt.Errorf("writing oci-layout file: %w", err) } // build the reference to this artifact image's oci layout - ref, err := ocilayout.NewReference(tmp, tag) + ref, err := ocilayout.NewReference(tmp, "") if err != nil { return nil, fmt.Errorf("creating ImageReference for artifact with files %q: %w", symlinkedFiles, err) } @@ -676,14 +676,14 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag // This should provide for all of the ways to construct a manifest outlined in // https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage -// * no blobs → set ManifestArtifactType -// * blobs, no configuration → set ManifestArtifactType and possibly LayerMediaType, and provide file names -// * blobs and configuration → set ManifestArtifactType, possibly LayerMediaType, and ConfigDescriptor, and provide file names +// - no blobs → set ManifestArtifactType +// - blobs, no configuration → set ManifestArtifactType and possibly LayerMediaType, and provide file names +// - blobs and configuration → set ManifestArtifactType, possibly LayerMediaType, and ConfigDescriptor, and provide file names // // The older style of describing artifacts: -// * leave ManifestArtifactType blank -// * specify a zero-length application/vnd.oci.image.config.v1+json config blob -// * set LayerMediaType to a custom type +// - leave ManifestArtifactType blank +// - specify a zero-length application/vnd.oci.image.config.v1+json config blob +// - set LayerMediaType to a custom type // // When reading data produced elsewhere, note that newer tooling will produce // manifests with ArtifactType set. If the manifest's ArtifactType is not set, diff --git a/vendor/github.com/containers/common/libimage/platform/platform.go b/vendor/github.com/containers/common/libimage/platform/platform.go index 2272cee75..54c3b8deb 100644 --- a/vendor/github.com/containers/common/libimage/platform/platform.go +++ b/vendor/github.com/containers/common/libimage/platform/platform.go @@ -4,7 +4,7 @@ import ( "fmt" "runtime" - "github.com/containerd/containerd/platforms" + "github.com/containerd/platforms" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 3db1b2992..02b6a4233 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -17,15 +17,14 @@ import ( dockerArchiveTransport "github.com/containers/image/v5/docker/archive" dockerDaemonTransport "github.com/containers/image/v5/docker/daemon" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" ociArchiveTransport "github.com/containers/image/v5/oci/archive" ociTransport "github.com/containers/image/v5/oci/layout" "github.com/containers/image/v5/pkg/shortnames" storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/storage" - digest "github.com/opencontainers/go-digest" ociSpec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -231,11 +230,11 @@ func nameFromAnnotations(annotations map[string]string) string { // copyFromDefault is the default copier for a number of transports. Other // transports require some specific dancing, sometimes Yoga. func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { - c, err := r.newCopier(options) + c, err := r.newCopier(options, nil) if err != nil { return nil, err } - defer c.close() + defer c.Close() // Figure out a name for the storage destination. var storageName, imageName string @@ -321,7 +320,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, return nil, fmt.Errorf("parsing %q: %w", storageName, err) } - _, err = c.copy(ctx, ref, destRef) + _, err = c.Copy(ctx, ref, destRef) return []string{imageName}, err } @@ -387,11 +386,11 @@ func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageRefe // copyFromDockerArchiveReaderReference copies the specified readerRef from reader. func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, reader *dockerArchiveTransport.Reader, readerRef types.ImageReference, options *CopyOptions) ([]string, error) { - c, err := r.newCopier(options) + c, err := r.newCopier(options, nil) if err != nil { return nil, err } - defer c.close() + defer c.Close() // Get a slice of storage references we can copy. references, destNames, err := r.storageReferencesReferencesFromArchiveReader(ctx, readerRef, reader) @@ -401,7 +400,7 @@ func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, read // Now copy all of the images. Use readerRef for performance. for _, destRef := range references { - if _, err := c.copy(ctx, readerRef, destRef); err != nil { + if _, err := c.Copy(ctx, readerRef, destRef); err != nil { return nil, err } } @@ -421,7 +420,11 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference } if !options.AllTags { - return r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options) + pulled, err := r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options) + if err != nil { + return nil, err + } + return []string{pulled}, nil } // Copy all tags @@ -447,68 +450,19 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference if err != nil { return nil, err } - pulledIDs = append(pulledIDs, pulled...) + pulledIDs = append(pulledIDs, pulled) } return pulledIDs, nil } -// imageIDsForManifest() parses the manifest of the copied image and then looks -// up the IDs of the matching image. There's a small slice of time, between -// when we copy the image into local storage and when we go to look for it -// using the name that we gave it when we copied it, when the name we wanted to -// assign to the image could have been moved, but the image's ID will remain -// the same until it is deleted. -func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemContext) ([]string, error) { - var imageDigest digest.Digest - manifestType := manifest.GuessMIMEType(manifestBytes) - if manifest.MIMETypeIsMultiImage(manifestType) { - list, err := manifest.ListFromBlob(manifestBytes, manifestType) - if err != nil { - return nil, fmt.Errorf("parsing manifest list: %w", err) - } - d, err := list.ChooseInstance(sys) - if err != nil { - return nil, fmt.Errorf("choosing instance from manifest list: %w", err) - } - imageDigest = d - } else { - d, err := manifest.Digest(manifestBytes) - if err != nil { - return nil, errors.New("digesting manifest") - } - imageDigest = d - } - images, err := r.store.ImagesByDigest(imageDigest) - if err != nil { - return nil, fmt.Errorf("listing images by manifest digest: %w", err) - } - - // If you have additionStores defined and the same image stored in - // both storage and additional store, it can be output twice. - // Fixes github.com/containers/podman/issues/18647 - results := []string{} - imageMap := map[string]bool{} - for _, image := range images { - if imageMap[image.ID] { - continue - } - imageMap[image.ID] = true - results = append(results, image.ID) - } - if len(results) == 0 { - return nil, fmt.Errorf("identifying new image by manifest digest: %w", storage.ErrImageUnknown) - } - return results, nil -} - // copySingleImageFromRegistry pulls the specified, possibly unqualified, name // from a registry. On successful pull it returns the ID of the image in local -// storage. -func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { //nolint:gocyclo +// storage (or, FIXME, a name/ID? that could be resolved in local storage) +func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) (string, error) { //nolint:gocyclo // Sanity check. if err := pullPolicy.Validate(); err != nil { - return nil, err + return "", err } var ( @@ -533,6 +487,14 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str if options.OS != runtime.GOOS { lookupImageOptions.OS = options.OS } + // FIXME: We sometimes return resolvedImageName from this function. + // The function documentation says this returns an image ID, resolvedImageName is frequently not an image ID. + // + // Ultimately Runtime.Pull looks up the returned name... again, possibly finding some other match + // than we did. + // + // This should be restructured so that the image we found here is returned to the caller of Pull + // directly, without another image -> name -> image round-trip and possible inconsistency. localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) if err != nil && !errors.Is(err, storage.ErrImageUnknown) { logrus.Errorf("Looking up %s in local storage: %v", imageName, err) @@ -563,23 +525,23 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str if pullPolicy == config.PullPolicyNever { if localImage != nil { logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) - return []string{resolvedImageName}, nil + return resolvedImageName, nil } logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) - return nil, fmt.Errorf("%s: %w", imageName, storage.ErrImageUnknown) + return "", fmt.Errorf("%s: %w", imageName, storage.ErrImageUnknown) } if pullPolicy == config.PullPolicyMissing && localImage != nil { - return []string{resolvedImageName}, nil + return resolvedImageName, nil } // If we looked up the image by ID, we cannot really pull from anywhere. if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) { switch pullPolicy { case config.PullPolicyAlways: - return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName) + return "", fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName) default: - return []string{resolvedImageName}, nil + return resolvedImageName, nil } } @@ -604,9 +566,9 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str resolved, err := shortnames.Resolve(sys, imageName) if err != nil { if localImage != nil && pullPolicy == config.PullPolicyNewer { - return []string{resolvedImageName}, nil + return resolvedImageName, nil } - return nil, err + return "", err } // NOTE: Below we print the description from the short-name resolution. @@ -636,11 +598,12 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str if socketPath, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { options.extendTimeoutSocket = socketPath } - c, err := r.newCopier(&options.CopyOptions) + var resolvedReference types.ImageReference + c, err := r.newCopier(&options.CopyOptions, &resolvedReference) if err != nil { - return nil, err + return "", err } - defer c.close() + defer c.Close() var pullErrors []error for _, candidate := range resolved.PullCandidates { @@ -648,7 +611,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str logrus.Debugf("Attempting to pull candidate %s for %s", candidateString, imageName) srcRef, err := registryTransport.NewReference(candidate.Value) if err != nil { - return nil, err + return "", err } if pullPolicy == config.PullPolicyNewer && localImage != nil { @@ -666,19 +629,18 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str destRef, err := storageTransport.Transport.ParseStoreReference(r.store, candidate.Value.String()) if err != nil { - return nil, err + return "", err } if err := writeDesc(); err != nil { - return nil, err + return "", err } if options.Writer != nil { if _, err := io.WriteString(options.Writer, fmt.Sprintf("Trying to pull %s...\n", candidateString)); err != nil { - return nil, err + return "", err } } - var manifestBytes []byte - if manifestBytes, err = c.copy(ctx, srcRef, destRef); err != nil { + if _, err := c.Copy(ctx, srcRef, destRef); err != nil { logrus.Debugf("Error pulling candidate %s: %v", candidateString, err) pullErrors = append(pullErrors, err) continue @@ -691,19 +653,23 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str } logrus.Debugf("Pulled candidate %s successfully", candidateString) - if ids, err := r.imagesIDsForManifest(manifestBytes, sys); err == nil { - return ids, nil + if resolvedReference == nil { // resolvedReference should always be set for storageTransport destinations + return "", fmt.Errorf("internal error: After pulling %s, resolvedReference is nil", candidateString) + } + _, image, err := storageTransport.ResolveReference(resolvedReference) + if err != nil { + return "", fmt.Errorf("resolving an already-resolved reference %q to the pulled image: %w", transports.ImageName(resolvedReference), err) } - return []string{candidate.Value.String()}, nil + return image.ID, nil } if localImage != nil && pullPolicy == config.PullPolicyNewer { - return []string{resolvedImageName}, nil + return resolvedImageName, nil } if len(pullErrors) == 0 { - return nil, fmt.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy) + return "", fmt.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy) } - return nil, resolved.FormatPullErrors(pullErrors) + return "", resolved.FormatPullErrors(pullErrors) } diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go index f89b8fc07..dc9934480 100644 --- a/vendor/github.com/containers/common/libimage/push.go +++ b/vendor/github.com/containers/common/libimage/push.go @@ -109,12 +109,12 @@ func (r *Runtime) Push(ctx context.Context, source, destination string, options } } - c, err := r.newCopier(&options.CopyOptions) + c, err := r.newCopier(&options.CopyOptions, nil) if err != nil { return nil, err } - defer c.close() + defer c.Close() - return c.copy(ctx, srcRef, destRef) + return c.Copy(ctx, srcRef, destRef) } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 1baf41d5d..f7c9b2bf9 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -161,6 +161,23 @@ func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageRef } } +// getImagesAndLayers obtains consistent slices of Image and storage.Layer +func (r *Runtime) getImagesAndLayers() ([]*Image, []storage.Layer, error) { + snapshot, err := r.store.MultiList( + storage.MultiListOptions{ + Images: true, + Layers: true, + }) + if err != nil { + return nil, nil, err + } + images := []*Image{} + for i := range snapshot.Images { + images = append(images, r.storageToImage(&snapshot.Images[i], nil)) + } + return images, snapshot.Layers, nil +} + // Exists returns true if the specified image exists in the local containers // storage. Note that it may return false if an image corrupted. func (r *Runtime) Exists(name string) (bool, error) { @@ -479,7 +496,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", originalName, storage.ErrImageUnknown) } - allImages, err := r.ListImages(context.Background(), nil, nil) + allImages, err := r.ListImages(context.Background(), nil) if err != nil { return nil, "", err } @@ -567,39 +584,46 @@ type ListImagesOptions struct { SetListData bool } -// ListImages lists images in the local container storage. If names are -// specified, only images with the specified names are looked up and filtered. -func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListImagesOptions) ([]*Image, error) { - if options == nil { - options = &ListImagesOptions{} - } - - var images []*Image - if len(names) > 0 { - for _, name := range names { - image, _, err := r.LookupImage(name, nil) - if err != nil { - return nil, err - } - images = append(images, image) - } - } else { - storageImages, err := r.store.Images() +// ListImagesByNames lists the images in the local container storage by specified names +// The name lookups use the LookupImage semantics. +func (r *Runtime) ListImagesByNames(names []string) ([]*Image, error) { + images := []*Image{} + for _, name := range names { + image, _, err := r.LookupImage(name, nil) if err != nil { return nil, err } - for i := range storageImages { - images = append(images, r.storageToImage(&storageImages[i], nil)) - } + images = append(images, image) + } + return images, nil +} + +// ListImages lists the images in the local container storage and filter the images by ListImagesOptions +func (r *Runtime) ListImages(ctx context.Context, options *ListImagesOptions) ([]*Image, error) { + if options == nil { + options = &ListImagesOptions{} } - filtered, err := r.filterImages(ctx, images, options) + filters, needsLayerTree, err := r.compileImageFilters(ctx, options) if err != nil { return nil, err } - if !options.SetListData { - return filtered, nil + if options.SetListData { + needsLayerTree = true + } + + snapshot, err := r.store.MultiList( + storage.MultiListOptions{ + Images: true, + Layers: needsLayerTree, + }) + if err != nil { + return nil, err + } + images := []*Image{} + for i := range snapshot.Images { + images = append(images, r.storageToImage(&snapshot.Images[i], nil)) } // If explicitly requested by the user, pre-compute and cache the @@ -608,11 +632,23 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI // as the layer tree will computed once for all instead of once for // each individual image (see containers/podman/issues/17828). - tree, err := r.layerTree(ctx, images) + var tree *layerTree + if needsLayerTree { + tree, err = r.newLayerTreeFromData(images, snapshot.Layers) + if err != nil { + return nil, err + } + } + + filtered, err := r.filterImages(ctx, images, filters, tree) if err != nil { return nil, err } + if !options.SetListData { + return filtered, nil + } + for i := range filtered { isDangling, err := filtered[i].isDangling(ctx, tree) if err != nil { @@ -753,7 +789,7 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem IsExternalContainerFunc: options.IsExternalContainerFunc, Filters: options.Filters, } - filteredImages, err := r.ListImages(ctx, nil, options) + filteredImages, err := r.ListImages(ctx, options) if err != nil { appendError(err) return nil, rmErrors diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index 62cad3288..c9cc8a507 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -119,13 +119,13 @@ func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string return err } - c, err := r.newCopier(&options.CopyOptions) + c, err := r.newCopier(&options.CopyOptions, nil) if err != nil { return err } - defer c.close() + defer c.Close() - _, err = c.copy(ctx, srcRef, destRef) + _, err = c.Copy(ctx, srcRef, destRef) return err } @@ -204,11 +204,11 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st copyOpts := options.CopyOptions copyOpts.dockerArchiveAdditionalTags = local.tags - c, err := r.newCopier(©Opts) + c, err := r.newCopier(©Opts, nil) if err != nil { return err } - defer c.close() + defer c.Close() destRef, err := writer.NewReference(nil) if err != nil { @@ -220,7 +220,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st return err } - if _, err := c.copy(ctx, srcRef, destRef); err != nil { + if _, err := c.Copy(ctx, srcRef, destRef); err != nil { return err } } diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index b26ad80d2..6ab016b3b 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -215,7 +215,7 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri } paramsArr := []SearchResult{} - for i := 0; i < limit; i++ { + for i := range limit { // Check whether query matches filters if !(filterMatchesAutomatedFilter(&options.Filter, results[i]) && filterMatchesOfficialFilter(&options.Filter, results[i]) && filterMatchesStarFilter(&options.Filter, results[i])) { continue @@ -275,7 +275,7 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr } } paramsArr := []SearchResult{} - for i := 0; i < limit; i++ { + for i := range limit { params := SearchResult{ Name: imageRef.DockerReference().Name(), Tag: tags[i], diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go index 337a27b8e..68a42945c 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/run.go +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -62,7 +62,6 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma } for name, netOpts := range options.Networks { - netOpts := netOpts network := n.networks[name] rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) @@ -237,7 +236,6 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption var multiErr *multierror.Error teardown := func() error { for name, netOpts := range options.Networks { - netOpts := netOpts rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go index 7ca62137b..8a7cc534e 100644 --- a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -229,9 +229,10 @@ func checkIfEntryExists(current HostEntry, entries HostEntries) bool { return false } -// parseExtraHosts converts a slice of "name:ip" string to entries. -// Because podman and buildah both store the extra hosts in this format -// we convert it here instead of having to this on the caller side. +// parseExtraHosts converts a slice of "name1;name2;name3:ip" string to entries. +// Each entry can contain one or more hostnames separated by semicolons and an IP address separated by a colon. +// Because podman and buildah both store the extra hosts in this format, +// we convert it here instead of having to do this on the caller side. func parseExtraHosts(extraHosts []string, hostContainersInternalIP string) (HostEntries, error) { entries := make(HostEntries, 0, len(extraHosts)) for _, entry := range extraHosts { @@ -252,7 +253,8 @@ func parseExtraHosts(extraHosts []string, hostContainersInternalIP string) (Host } ip = hostContainersInternalIP } - e := HostEntry{IP: ip, Names: []string{values[0]}} + names := strings.Split(values[0], ";") + e := HostEntry{IP: ip, Names: names} entries = append(entries, e) } return entries, nil diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/ip.go b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go index 909fcf67d..588fbff35 100644 --- a/vendor/github.com/containers/common/libnetwork/etchosts/ip.go +++ b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go @@ -10,17 +10,27 @@ import ( "github.com/containers/storage/pkg/unshare" ) -// GetHostContainersInternalIP returns the host.containers.internal ip -// if netStatus is not nil then networkInterface also must be non nil otherwise this function panics -func GetHostContainersInternalIP(conf *config.Config, netStatus map[string]types.StatusBlock, networkInterface types.ContainerNetwork) string { - return GetHostContainersInternalIPExcluding(conf, netStatus, networkInterface, nil) +// HostContainersInternalOptions contains the options for GetHostContainersInternalIP() +type HostContainersInternalOptions struct { + // Conf is the containers.Conf, must not be nil + Conf *config.Config + // NetStatus is the network status for the container, + // if this is set networkInterface must not be nil + NetStatus map[string]types.StatusBlock + // NetworkInterface of the current runtime + NetworkInterface types.ContainerNetwork + // Exclude are then ips that should not be returned, this is + // useful to prevent returning the same ip as in the container. + Exclude []net.IP + // PreferIP is a ip that should be used if set but it has a + // lower priority than the containers.conf config option. + // This is used for the pasta --map-guest-addr ip. + PreferIP string } -// GetHostContainersInternalIPExcluding returns the host.containers.internal ip -// Exclude are ips that should not be returned, this is useful to prevent returning the same ip as in the container. -// if netStatus is not nil then networkInterface also must be non nil otherwise this function panics -func GetHostContainersInternalIPExcluding(conf *config.Config, netStatus map[string]types.StatusBlock, networkInterface types.ContainerNetwork, exclude []net.IP) string { - switch conf.Containers.HostContainersInternalIP { +// GetHostContainersInternalIP returns the host.containers.internal ip +func GetHostContainersInternalIP(opts HostContainersInternalOptions) string { + switch opts.Conf.Containers.HostContainersInternalIP { case "": // if empty (default) we will automatically choose one below // if machine using gvproxy we let the gvproxy dns server handle the dns name so do not add it @@ -30,16 +40,22 @@ func GetHostContainersInternalIPExcluding(conf *config.Config, netStatus map[str case "none": return "" default: - return conf.Containers.HostContainersInternalIP + return opts.Conf.Containers.HostContainersInternalIP + } + + // caller has a specific ip it prefers + if opts.PreferIP != "" { + return opts.PreferIP } + ip := "" // Only use the bridge ip when root, as rootless the interfaces are created // inside the special netns and not the host so we cannot use them. if unshare.IsRootless() { - return util.GetLocalIPExcluding(exclude) + return util.GetLocalIPExcluding(opts.Exclude) } - for net, status := range netStatus { - network, err := networkInterface.NetworkInspect(net) + for net, status := range opts.NetStatus { + network, err := opts.NetworkInterface.NetworkInspect(net) // only add the host entry for bridge networks // ip/macvlan gateway is normally not on the host if err != nil || network.Driver != types.BridgeNetworkDriver { @@ -60,7 +76,19 @@ func GetHostContainersInternalIPExcluding(conf *config.Config, netStatus map[str if ip != "" { return ip } - return util.GetLocalIPExcluding(exclude) + return util.GetLocalIPExcluding(opts.Exclude) +} + +// GetHostContainersInternalIPExcluding returns the host.containers.internal ip +// Exclude are ips that should not be returned, this is useful to prevent returning the same ip as in the container. +// if netStatus is not nil then networkInterface also must be non nil otherwise this function panics +func GetHostContainersInternalIPExcluding(conf *config.Config, netStatus map[string]types.StatusBlock, networkInterface types.ContainerNetwork, exclude []net.IP) string { + return GetHostContainersInternalIP(HostContainersInternalOptions{ + Conf: conf, + NetStatus: netStatus, + NetworkInterface: networkInterface, + Exclude: exclude, + }) } // GetNetworkHostEntries returns HostEntries for all ips in the network status diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go index f8b9b76df..7fac465a6 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go @@ -1,6 +1,7 @@ package rootlessnetns import ( + "encoding/json" "errors" "fmt" "io/fs" @@ -34,6 +35,9 @@ const ( // refCountFile file name for the ref count file refCountFile = "ref-count" + // infoCacheFile file name for the cache file used to store the rootless netns info + infoCacheFile = "info.json" + // rootlessNetNsConnPidFile is the name of the rootless netns slirp4netns/pasta pid file rootlessNetNsConnPidFile = "rootless-netns-conn.pid" @@ -54,11 +58,9 @@ type Netns struct { // config contains containers.conf options. config *config.Config - // ipAddresses used in the netns, this is needed to store - // the netns ips that are used by pasta. This is then handed - // back to the caller via IPAddresses() which then can make - // sure to not use them for host.containers.internal. - ipAddresses []net.IP + // info contain information about ip addresses used in the netns. + // A caller can get this info via Info(). + info *types.RootlessNetnsInfo } type rootlessNetnsError struct { @@ -115,6 +117,9 @@ func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) { // quick check if pasta/slirp4netns are still running err := unix.Kill(pid, 0) if err == nil { + if err := n.deserializeInfo(); err != nil { + return nil, false, wrapError("deserialize info", err) + } // All good, return the netns. return nsRef, false, nil } @@ -196,7 +201,7 @@ func (n *Netns) setupPasta(nsPath string) error { Netns: nsPath, ExtraOptions: []string{"--pid", pidPath}, } - res, err := pasta.Setup2(&pastaOpts) + res, err := pasta.Setup(&pastaOpts) if err != nil { return fmt.Errorf("setting up Pasta: %w", err) } @@ -227,6 +232,15 @@ func (n *Netns) setupPasta(nsPath string) error { return wrapError("create resolv.conf", err) } + n.info = &types.RootlessNetnsInfo{ + IPAddresses: res.IPAddresses, + DnsForwardIps: res.DNSForwardIPs, + MapGuestIps: res.MapGuestAddrIPs, + } + if err := n.serializeInfo(); err != nil { + return wrapError("serialize info", err) + } + return nil } @@ -261,6 +275,12 @@ func (n *Netns) setupSlirp4netns(nsPath string) error { if err != nil { return wrapError("determine default slirp4netns DNS address", err) } + nameservers := []string{resolveIP.String()} + + netnsIP, err := slirp4netns.GetIP(res.Subnet) + if err != nil { + return wrapError("determine default slirp4netns ip address", err) + } if err := resolvconf.New(&resolvconf.Params{ Path: n.getPath(resolvConfName), @@ -270,10 +290,19 @@ func (n *Netns) setupSlirp4netns(nsPath string) error { }, IPv6Enabled: res.IPv6, KeepHostServers: true, - Nameservers: []string{resolveIP.String()}, + Nameservers: nameservers, }); err != nil { return wrapError("create resolv.conf", err) } + + n.info = &types.RootlessNetnsInfo{ + IPAddresses: []net.IP{*netnsIP}, + DnsForwardIps: nameservers, + } + if err := n.serializeInfo(); err != nil { + return wrapError("serialize info", err) + } + return nil } @@ -541,20 +570,6 @@ func (n *Netns) runInner(toRun func() error, cleanup bool) (err error) { if err := toRun(); err != nil { return err } - - // get the current active addresses in the netns, and store them - addrs, err := net.InterfaceAddrs() - if err != nil { - return err - } - ips := make([]net.IP, 0, len(addrs)) - for _, addr := range addrs { - // make sure to skip localhost and other special addresses - if ipnet, ok := addr.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { - ips = append(ips, ipnet.IP) - } - } - n.ipAddresses = ips return nil }) } @@ -630,9 +645,7 @@ func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error { // IPAddresses returns the currently used ip addresses in the netns // These should then not be assigned for the host.containers.internal entry. func (n *Netns) Info() *types.RootlessNetnsInfo { - return &types.RootlessNetnsInfo{ - IPAddresses: n.ipAddresses, - } + return n.info } func refCount(dir string, inc int) (int, error) { @@ -671,3 +684,26 @@ func readPidFile(path string) (int, error) { } return strconv.Atoi(strings.TrimSpace(string(b))) } + +func (n *Netns) serializeInfo() error { + f, err := os.Create(filepath.Join(n.dir, infoCacheFile)) + if err != nil { + return err + } + return json.NewEncoder(f).Encode(n.info) +} + +func (n *Netns) deserializeInfo() error { + f, err := os.Open(filepath.Join(n.dir, infoCacheFile)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + defer f.Close() + if n.info == nil { + n.info = new(types.RootlessNetnsInfo) + } + return json.NewDecoder(f).Decode(n.info) +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go index 96c21e76f..260e74d06 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/util.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -112,7 +112,7 @@ func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet, subnetPools []config.Su // GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet func GetFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { // FIXME: Is 10000 fine as limit? We should prevent an endless loop. - for i := 0; i < 10000; i++ { + for range 10000 { // RFC4193: Choose the ipv6 subnet random and NOT sequentially. network, err := getRandomIPv6Subnet() if err != nil { diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go index 55995440e..ddc778a5f 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -129,7 +129,6 @@ func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOp return errors.New("must specify at least one network") } for name, netOpts := range options.Networks { - netOpts := netOpts network, err := n.Network(name) if err != nil { return err diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index 842c7e312..8b43a787e 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -126,7 +126,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo // generate random network ID var i int - for i = 0; i < 1000; i++ { + for i = range 1000 { id := stringid.GenerateNonCryptoID() if _, err := n.getNetwork(id); err != nil { newNetwork.ID = id diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_cni.go b/vendor/github.com/containers/common/libnetwork/network/interface_cni.go index 0a8bc4472..3de3a3045 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface_cni.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface_cni.go @@ -1,6 +1,4 @@ //go:build (linux || freebsd) && cni -// +build linux freebsd -// +build cni package network diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go b/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go index 2f4bb8371..d0645dc50 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go @@ -1,6 +1,4 @@ //go:build (linux || freebsd) && !cni -// +build linux freebsd -// +build !cni package network diff --git a/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go b/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go index fd68f87f2..2ee6369ef 100644 --- a/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go +++ b/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go @@ -26,11 +26,16 @@ import ( ) const ( - dnsForwardOpt = "--dns-forward" + dnsForwardOpt = "--dns-forward" + mapGuestAddrOpt = "--map-guest-addr" // dnsForwardIpv4 static ip used as nameserver address inside the netns, // given this is a "link local" ip it should be very unlikely that it causes conflicts - dnsForwardIpv4 = "169.254.0.1" + dnsForwardIpv4 = "169.254.1.1" + + // mapGuestAddrIpv4 static ip used as forwarder address inside the netns to reach the host, + // given this is a "link local" ip it should be very unlikely that it causes conflicts + mapGuestAddrIpv4 = "169.254.1.2" ) type SetupOptions struct { @@ -45,45 +50,56 @@ type SetupOptions struct { ExtraOptions []string } -func Setup(opts *SetupOptions) error { - _, err := Setup2(opts) - return err -} - -// Setup2 start the pasta process for the given netns. +// Setup start the pasta process for the given netns. // The pasta binary is looked up in the HelperBinariesDir and $PATH. // Note that there is no need for any special cleanup logic, the pasta // process will automatically exit when the netns path is deleted. -func Setup2(opts *SetupOptions) (*SetupResult, error) { +func Setup(opts *SetupOptions) (*SetupResult, error) { path, err := opts.Config.FindHelperBinary(BinaryName, true) if err != nil { return nil, fmt.Errorf("could not find pasta, the network namespace can't be configured: %w", err) } - cmdArgs, dnsForwardIPs, err := createPastaArgs(opts) + cmdArgs, dnsForwardIPs, mapGuestAddrIPs, err := createPastaArgs(opts) if err != nil { return nil, err } logrus.Debugf("pasta arguments: %s", strings.Join(cmdArgs, " ")) - // pasta forks once ready, and quits once we delete the target namespace - out, err := exec.Command(path, cmdArgs...).CombinedOutput() - if err != nil { - exitErr := &exec.ExitError{} - if errors.As(err, &exitErr) { - return nil, fmt.Errorf("pasta failed with exit code %d:\n%s", - exitErr.ExitCode(), string(out)) + for { + // pasta forks once ready, and quits once we delete the target namespace + out, err := exec.Command(path, cmdArgs...).CombinedOutput() + if err != nil { + exitErr := &exec.ExitError{} + if errors.As(err, &exitErr) { + // special backwards compat check, --map-guest-addr was added in pasta version 20240814 so we + // cannot hard require it yet. Once we are confident that the update is most distros we can remove it. + if exitErr.ExitCode() == 1 && + strings.Contains(string(out), "unrecognized option '"+mapGuestAddrOpt) && + len(mapGuestAddrIPs) == 1 && mapGuestAddrIPs[0] == mapGuestAddrIpv4 { + // we did add the default --map-guest-addr option, if users set something different we want + // to get to the error below. We have to unset mapGuestAddrIPs here to avoid a infinite loop. + mapGuestAddrIPs = nil + // Trim off last two args which are --map-guest-addr 169.254.1.2. + cmdArgs = cmdArgs[:len(cmdArgs)-2] + continue + } + return nil, fmt.Errorf("pasta failed with exit code %d:\n%s", + exitErr.ExitCode(), string(out)) + } + return nil, fmt.Errorf("failed to start pasta: %w", err) } - return nil, fmt.Errorf("failed to start pasta: %w", err) - } - if len(out) > 0 { - // TODO: This should be warning but right now pasta still prints - // things with --quiet that we do not care about. - // For now info is fine and we can bump it up later, it is only a - // nice to have. - logrus.Infof("pasta logged warnings: %q", string(out)) + if len(out) > 0 { + // TODO: This should be warning but as of August 2024 pasta still prints + // things with --quiet that we do not care about. In podman CI I still see + // "Couldn't get any nameserver address" so until this is fixed we cannot + // enable it. For now info is fine and we can bump it up later, it is only a + // nice to have. + logrus.Infof("pasta logged warnings: %q", strings.TrimSpace(string(out))) + } + break } var ipv4, ipv6 bool @@ -112,19 +128,27 @@ func Setup2(opts *SetupOptions) (*SetupResult, error) { } result.IPv6 = ipv6 - for _, ip := range dnsForwardIPs { + result.DNSForwardIPs = filterIpFamily(dnsForwardIPs, ipv4, ipv6) + result.MapGuestAddrIPs = filterIpFamily(mapGuestAddrIPs, ipv4, ipv6) + + return result, nil +} + +func filterIpFamily(ips []string, ipv4, ipv6 bool) []string { + var result []string + for _, ip := range ips { ipp := net.ParseIP(ip) - // add the namesever ip only if the address family matches + // add the ip only if the address family matches if ipv4 && util.IsIPv4(ipp) || ipv6 && util.IsIPv6(ipp) { - result.DNSForwardIPs = append(result.DNSForwardIPs, ip) + result = append(result, ip) } } - - return result, nil + return result } -// createPastaArgs creates the pasta arguments, it returns the args to be passed to pasta(1) and as second arg the dns forward ips used. -func createPastaArgs(opts *SetupOptions) ([]string, []string, error) { +// createPastaArgs creates the pasta arguments, it returns the args to be passed to pasta(1) +// and as second arg the dns forward ips used. As third arg the map guest addr ips used. +func createPastaArgs(opts *SetupOptions) ([]string, []string, []string, error) { noTCPInitPorts := true noUDPInitPorts := true noTCPNamespacePorts := true @@ -149,6 +173,7 @@ func createPastaArgs(opts *SetupOptions) ([]string, []string, error) { }) var dnsForwardIPs []string + var mapGuestAddrIPs []string for i, opt := range cmdArgs { switch opt { case "-t", "--tcp-ports": @@ -166,6 +191,10 @@ func createPastaArgs(opts *SetupOptions) ([]string, []string, error) { if len(cmdArgs) > i+1 { dnsForwardIPs = append(dnsForwardIPs, cmdArgs[i+1]) } + case mapGuestAddrOpt: + if len(cmdArgs) > i+1 { + mapGuestAddrIPs = append(mapGuestAddrIPs, cmdArgs[i+1]) + } } } @@ -186,7 +215,7 @@ func createPastaArgs(opts *SetupOptions) ([]string, []string, error) { noUDPInitPorts = false cmdArgs = append(cmdArgs, "-u") default: - return nil, nil, fmt.Errorf("can't forward protocol: %s", protocol) + return nil, nil, nil, fmt.Errorf("can't forward protocol: %s", protocol) } arg := fmt.Sprintf("%s%d-%d:%d-%d", addr, @@ -226,5 +255,13 @@ func createPastaArgs(opts *SetupOptions) ([]string, []string, error) { cmdArgs = append(cmdArgs, "--netns", opts.Netns) - return cmdArgs, dnsForwardIPs, nil + // do this as last arg so we can easily trim them off in the error case when we have an older version + if len(mapGuestAddrIPs) == 0 { + // the user did not request custom --map-guest-addr so add our own so that we can use this + // for our own host.containers.internal host entry. + cmdArgs = append(cmdArgs, mapGuestAddrOpt, mapGuestAddrIpv4) + mapGuestAddrIPs = append(mapGuestAddrIPs, mapGuestAddrIpv4) + } + + return cmdArgs, dnsForwardIPs, mapGuestAddrIPs, nil } diff --git a/vendor/github.com/containers/common/libnetwork/pasta/types.go b/vendor/github.com/containers/common/libnetwork/pasta/types.go index b601e5169..f570afc3e 100644 --- a/vendor/github.com/containers/common/libnetwork/pasta/types.go +++ b/vendor/github.com/containers/common/libnetwork/pasta/types.go @@ -10,6 +10,9 @@ type SetupResult struct { // DNSForwardIP is the ip used in --dns-forward, it should be added as first // entry to resolv.conf in the container. DNSForwardIPs []string + // MapGuestIps are the ips used for the --map-guest-addr option which + // we can use for the host.containers.internal entry. + MapGuestAddrIPs []string // IPv6 says whenever pasta run with ipv6 support IPv6 bool } diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go index 6b844373d..97cf48cfd 100644 --- a/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go @@ -32,11 +32,12 @@ var ( // ipLocalhost is a regex pattern for IPv4 or IPv6 loopback range. ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` - localhostNSRegexp = regexp.Delayed(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`) - nsIPv6Regexp = regexp.Delayed(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) - nsRegexp = regexp.Delayed(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) - searchRegexp = regexp.Delayed(`^\s*search\s*(([^\s]+\s*)*)$`) - optionsRegexp = regexp.Delayed(`^\s*options\s*(([^\s]+\s*)*)$`) + localhostNSRegexp = regexp.Delayed(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`) + nsIPv6Regexp = regexp.Delayed(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) + nsIPv6LinkLocalRegexp = regexp.Delayed(`(?m)^nameserver\s+` + ipv6Address + `%.*\s*\n*`) + nsRegexp = regexp.Delayed(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + searchRegexp = regexp.Delayed(`^\s*search\s*(([^\s]+\s*)*)$`) + optionsRegexp = regexp.Delayed(`^\s*options\s*(([^\s]+\s*)*)$`) ) // filterResolvDNS cleans up the config in resolvConf. It has two main jobs: @@ -54,6 +55,10 @@ func filterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) []b // if IPv6 is not enabled, also clean out any IPv6 address nameserver if !ipv6Enabled { cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) + } else { + // If ipv6 is we still must remove any ipv6 link-local addresses as + // the zone will never match the interface name or index in the container. + cleanedResolvConf = nsIPv6LinkLocalRegexp.ReplaceAll(cleanedResolvConf, []byte{}) } // if the resulting resolvConf has no more nameservers defined, add appropriate // default DNS servers for IPv4 and (optionally) IPv6 diff --git a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go index 6f713431c..9721750ab 100644 --- a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go +++ b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go @@ -645,7 +645,7 @@ func setupRootlessPortMappingViaSlirp(ports []types.PortMapping, cmd *exec.Cmd, if hostIP == "" { hostIP = "0.0.0.0" } - for i := uint16(0); i < port.Range; i++ { + for i := range port.Range { if err := openSlirp4netnsPort(apiSocket, protocol, hostIP, port.HostPort+i, port.ContainerPort+i); err != nil { return err } diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go index 9741103f5..77c76bf78 100644 --- a/vendor/github.com/containers/common/libnetwork/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -342,6 +342,10 @@ type TeardownOptions struct { type RootlessNetnsInfo struct { // IPAddresses used in the netns, must not be used for host.containers.internal IPAddresses []net.IP + // DnsForwardIps ips used in resolv.conf + DnsForwardIps []string + // MapGuestIps should be used for the host.containers.internal entry when set + MapGuestIps []string } // FilterFunc can be passed to NetworkList to filter the networks. diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go index 43fd2c1b5..371fbba9a 100644 --- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -9,20 +9,13 @@ import ( "errors" "fmt" "slices" - "sort" "strings" "sync" - "github.com/syndtr/gocapability/capability" + "github.com/moby/sys/capability" ) var ( - // Used internally and populated during init(). - capabilityList []string - - // Used internally and populated during init(). - capsList []capability.Cap - // ErrUnknownCapability is thrown when an unknown capability is processed. ErrUnknownCapability = errors.New("unknown capability") @@ -35,67 +28,67 @@ var ( // Useful on the CLI for `--cap-add=all` etc. const All = "ALL" -func getCapName(c capability.Cap) string { +func capName(c capability.Cap) string { return "CAP_" + strings.ToUpper(c.String()) } -func init() { - last := capability.CAP_LAST_CAP - // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND +// capStrList returns all capabilities supported by the currently running kernel, +// or an error if the list can not be obtained. +var capStrList = sync.OnceValues(func() ([]string, error) { + list, err := capability.ListSupported() + if err != nil { + return nil, err } - for _, cap := range capability.List() { - if cap > last { - continue - } - capsList = append(capsList, cap) - capabilityList = append(capabilityList, getCapName(cap)) - sort.Strings(capabilityList) + caps := make([]string, len(list)) + for i, c := range list { + caps[i] = capName(c) } -} - -var ( - boundingSetOnce sync.Once - boundingSetRet []string - boundingSetErr error -) + slices.Sort(caps) + return caps, nil +}) -// BoundingSet returns the capabilities in the current bounding set +// BoundingSet returns the capabilities in the current bounding set. func BoundingSet() ([]string, error) { - boundingSetOnce.Do(func() { - currentCaps, err := capability.NewPid2(0) - if err != nil { - boundingSetErr = err - return - } - err = currentCaps.Load() - if err != nil { - boundingSetErr = err - return - } - var r []string - for _, c := range capsList { - if !currentCaps.Get(capability.BOUNDING, c) { - continue - } - r = append(r, getCapName(c)) - } - boundingSetRet = r - sort.Strings(boundingSetRet) - boundingSetErr = err - }) - return boundingSetRet, boundingSetErr + return boundingSet() } -// AllCapabilities returns all known capabilities. +var boundingSet = sync.OnceValues(func() ([]string, error) { + currentCaps, err := capability.NewPid2(0) + if err != nil { + return nil, err + } + err = currentCaps.Load() + if err != nil { + return nil, err + } + list, err := capability.ListSupported() + if err != nil { + return nil, err + } + var r []string + for _, c := range list { + if !currentCaps.Get(capability.BOUNDING, c) { + continue + } + r = append(r, capName(c)) + } + slices.Sort(r) + return r, nil +}) + +// AllCapabilities returns all capabilities supported by the running kernel. func AllCapabilities() []string { - return capabilityList + list, _ := capStrList() + return list } // NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet // present). func NormalizeCapabilities(caps []string) ([]string, error) { + all, err := capStrList() + if err != nil { + return nil, err + } normalized := make([]string, 0, len(caps)) for _, c := range caps { c = strings.ToUpper(c) @@ -106,19 +99,23 @@ func NormalizeCapabilities(caps []string) ([]string, error) { if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } - if !slices.Contains(capabilityList, c) { + if !slices.Contains(all, c) { return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability) } normalized = append(normalized, c) } - sort.Strings(normalized) + slices.Sort(normalized) return normalized, nil } // ValidateCapabilities validates if caps only contains valid capabilities. func ValidateCapabilities(caps []string) error { + all, err := capStrList() + if err != nil { + return err + } for _, c := range caps { - if !slices.Contains(capabilityList, c) { + if !slices.Contains(all, c) { return fmt.Errorf("%q: %w", c, ErrUnknownCapability) } } @@ -155,7 +152,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { return nil, errors.New("adding all caps and removing all caps not allowed") } // "Drop" all capabilities; return what's in capAdd instead - sort.Strings(capAdd) + slices.Sort(capAdd) return capAdd, nil } @@ -195,6 +192,6 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { } caps = append(caps, cap) } - sort.Strings(caps) + slices.Sort(caps) return caps, nil } diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index 717685789..d2c610fb1 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -95,7 +95,8 @@ func init() { func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { if cgroup2 { controllers := []controller{} - controllersFile := cgroupRoot + "/cgroup.controllers" + controllersFile := filepath.Join(cgroupRoot, "cgroup.controllers") + // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit if unshare.IsRootless() { userSlice, err := getCgroupPathForCurrentProcess() @@ -104,7 +105,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) } // userSlice already contains '/' so not adding here basePath := cgroupRoot + userSlice - controllersFile = basePath + "/cgroup.controllers" + controllersFile = filepath.Join(basePath, "cgroup.controllers") } controllersFileBytes, err := os.ReadFile(controllersFile) if err != nil { @@ -597,7 +598,7 @@ func createCgroupv2Path(path string) (deferredError error) { if !strings.HasPrefix(path, cgroupRoot+"/") { return fmt.Errorf("invalid cgroup path %s", path) } - content, err := os.ReadFile(cgroupRoot + "/cgroup.controllers") + content, err := os.ReadFile(filepath.Join(cgroupRoot, "cgroup.controllers")) if err != nil { return err } @@ -625,8 +626,43 @@ func createCgroupv2Path(path string) (deferredError error) { // We enable the controllers for all the path components except the last one. It is not allowed to add // PIDs if there are already enabled controllers. if i < len(elements[3:])-1 { - if err := os.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { - return err + subtreeControl := filepath.Join(current, "cgroup.subtree_control") + if err := os.WriteFile(subtreeControl, res, 0o755); err != nil { + // The kernel returns ENOENT either if the file itself is missing, or a controller + if errors.Is(err, os.ErrNotExist) { + if err2 := fileutils.Exists(subtreeControl); err2 != nil { + // If the file itself is missing, return the original error. + return err + } + repeatAttempts := 1000 + for repeatAttempts > 0 { + // store the controllers that failed to be enabled, so we can retry them + newCtrs := [][]byte{} + for _, ctr := range ctrs { + // Try to enable each controller individually, at least we can give a better error message if any fails. + if err := os.WriteFile(subtreeControl, []byte(fmt.Sprintf("+%s\n", ctr)), 0o755); err != nil { + // The kernel can return EBUSY when a process was moved to a sub-cgroup + // and the controllers are enabled in its parent cgroup. Retry a few times when + // it happens. + if errors.Is(err, unix.EBUSY) { + newCtrs = append(newCtrs, ctr) + } else { + return fmt.Errorf("enabling controller %s: %w", ctr, err) + } + } + } + if len(newCtrs) == 0 { + err = nil + break + } + ctrs = newCtrs + repeatAttempts-- + time.Sleep(time.Millisecond) + } + if err != nil { + return err + } + } } } } @@ -786,6 +822,12 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { continue } + // If we are on a cgroup v2 system and there are cgroup v1 controllers + // mounted, ignore them when the current process is at the root cgroup. + if cgroup2 && parts[1] != "" && parts[2] == "/" { + continue + } + var cgroupPath string if cgroup2 { diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go index ead630e75..b723b34cc 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go @@ -4,8 +4,12 @@ package cgroups import ( "context" + "errors" "fmt" + "math/big" "path/filepath" + "slices" + "strconv" "strings" systemdDbus "github.com/coreos/go-systemd/v22/dbus" @@ -23,7 +27,7 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con slice = strings.TrimSuffix(slice, "/") var lastError error - for i := 0; i < 2; i++ { + for i := range 2 { properties := []systemdDbus.Property{ systemdDbus.PropDescription("cgroup " + name), systemdDbus.PropWants(slice), @@ -53,7 +57,11 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con properties = append(properties, p) } - uMap, sMap, bMap, iMap, structMap := resourcesToProps(resources, v2) + uMap, sMap, bMap, iMap, structMap, err := resourcesToProps(resources, v2) + if err != nil { + lastError = err + continue + } for k, v := range uMap { p := systemdDbus.Property{ Name: k, @@ -95,7 +103,7 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con } ch := make(chan string) - _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) + _, err = c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) if err != nil { lastError = err continue @@ -142,7 +150,7 @@ func systemdDestroyConn(path string, c *systemdDbus.Conn) error { return nil } -func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64, map[string][]BlkioDev) { +func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64, map[string][]BlkioDev, error) { bMap := make(map[string][]byte) // this array is not used but will be once more resource limits are added sMap := make(map[string]string) @@ -179,11 +187,19 @@ func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[s // CPUSet if res.CpusetCpus != "" { - bits := []byte(res.CpusetCpus) + bits, err := rangeToBits(res.CpusetCpus) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("resources.CpusetCpus=%q conversion error: %w", + res.CpusetCpus, err) + } bMap["AllowedCPUs"] = bits } if res.CpusetMems != "" { - bits := []byte(res.CpusetMems) + bits, err := rangeToBits(res.CpusetMems) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("resources.CpusetMems=%q conversion error: %w", + res.CpusetMems, err) + } bMap["AllowedMemoryNodes"] = bits } @@ -258,5 +274,51 @@ func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[s } } - return uMap, sMap, bMap, iMap, structMap + return uMap, sMap, bMap, iMap, structMap, nil +} + +func rangeToBits(str string) ([]byte, error) { + bits := new(big.Int) + + for _, r := range strings.Split(str, ",") { + // allow extra spaces around + r = strings.TrimSpace(r) + // allow empty elements (extra commas) + if r == "" { + continue + } + startr, endr, ok := strings.Cut(r, "-") + if ok { + start, err := strconv.ParseUint(startr, 10, 32) + if err != nil { + return nil, err + } + end, err := strconv.ParseUint(endr, 10, 32) + if err != nil { + return nil, err + } + if start > end { + return nil, errors.New("invalid range: " + r) + } + for i := start; i <= end; i++ { + bits.SetBit(bits, int(i), 1) + } + } else { + val, err := strconv.ParseUint(startr, 10, 32) + if err != nil { + return nil, err + } + bits.SetBit(bits, int(val), 1) + } + } + + ret := bits.Bytes() + if len(ret) == 0 { + // do not allow empty values + return nil, errors.New("empty value") + } + + // fit cpuset parsing order in systemd + slices.Reverse(ret) + return ret, nil } diff --git a/vendor/github.com/containers/common/pkg/config/config_bsd.go b/vendor/github.com/containers/common/pkg/config/config_bsd.go index e378a0722..9f3513a67 100644 --- a/vendor/github.com/containers/common/pkg/config/config_bsd.go +++ b/vendor/github.com/containers/common/pkg/config/config_bsd.go @@ -1,4 +1,4 @@ -//go:build (freebsd || netbsd || openbsd) +//go:build freebsd || netbsd || openbsd package config diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index c00efecbb..236b51204 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -27,16 +27,19 @@ # #apparmor_profile = "container-default" -# The hosts entries from the base hosts file are added to the containers hosts -# file. This must be either an absolute path or as special values "image" which -# uses the hosts file from the container image or "none" which means -# no base hosts file is used. The default is "" which will use /etc/hosts. +# Base file to create the `/etc/hosts` file inside the container. This must either +# be an absolute path to a file on the host system, or one of the following +# special flags: +# "" Use the host's `/etc/hosts` file (the default) +# `none` Do not use a base file (i.e. start with an empty file) +# `image` Use the container image's `/etc/hosts` file as base file # #base_hosts_file = "" # List of cgroup_conf entries specifying a list of cgroup files to write to and # their values. For example `memory.high=1073741824` sets the # memory.high limit to 1GB. +# # cgroup_conf = [] # Default way to to create a cgroup namespace for the container @@ -126,13 +129,25 @@ default_sysctls = [ # #env_host = false -# Set the ip for the host.containers.internal entry in the containers /etc/hosts -# file. This can be set to "none" to disable adding this entry. By default it -# will automatically choose the host ip. -# -# NOTE: When using podman machine this entry will never be added to the containers -# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore -# it is not possible to disable the entry in this case. +# Set the IP address the container should expect to connect to the host. The IP +# address is used by Podman to automatically add the `host.containers.internal` +# and `host.docker.internal` hostnames to the container's `/etc/hosts` file. It +# is also used for the *host-gateway* flag of Podman's `--add-host` CLI option. +# If no IP address is configured (the default), Podman will try to determine it +# automatically, but might fail to do so depending on the container's network +# setup. Adding these internal hostnames to `/etc/hosts` is silently skipped then. +# Set this config to `none` to never add the internal hostnames to `/etc/hosts`. +# +# Note: If Podman is running in a virtual machine using `podman machine` (this +# includes Mac and Windows hosts), Podman will silently skip adding the internal +# hostnames to `/etc/hosts`, unless an IP address was configured manually. The +# internal hostnames are resolved by the gvproxy DNS resolver instead. This config +# has no effect on gvproxy. However, since `/etc/hosts` bypasses the DNS resolver, +# a manually configured IP address still takes precedence. +# +# Note: This config doesn't affect the actual network setup, it just tells Podman +# the IP address it should expect. Configuring an IP address here doesn't ensure +# that the container can actually reach the host using this IP address. # #host_containers_internal_ip = "" @@ -221,8 +236,10 @@ default_sysctls = [ # #netns = "private" -# Create /etc/hosts for the container. By default, container engine manage -# /etc/hosts, automatically adding the container's own IP address. +# Do not modify the `/etc/hosts` file in the container. Podman assumes control +# over the container's `/etc/hosts` file by default; refer to the `--add-host` +# CLI option for details. To disable this, either set this config to `true`, or +# use the functionally identical `--no-hosts` CLI option. # #no_hosts = false @@ -416,6 +433,8 @@ default_sysctls = [ #List of compression algorithms. If set makes sure that requested compression variant #for each platform is added to the manifest list keeping original instance intact in #the same manifest list on every `manifest push`. Supported values are (`gzip`, `zstd` and `zstd:chunked`). +#`zstd:chunked` is incompatible with encrypting images, and will be treated as `zstd` with a warning +#in that case. # #add_compression = ["gzip", "zstd", "zstd:chunked"] @@ -438,6 +457,8 @@ default_sysctls = [ # This field is ignored when pushing images to the docker-daemon and # docker-archive formats. It is also ignored when the manifest format is set # to v2s2. +# `zstd:chunked` is incompatible with encrypting images, and will be treated as `zstd` with a warning +# in that case. # #compression_format = "gzip" @@ -865,7 +886,15 @@ default_sysctls = [ # Virtualization provider used to run Podman machine. # If it is empty or commented out, the default provider will be used. -# +# Linux: +# qemu - Open source machine emulator and virtualizer. (Default) +# Windows: there are currently two options: +# wsl - Windows Subsystem for Linux (Default) +# hyperv - Windows Server Virtualization +# Mac: there are currently two options: +# applehv - Default Apple Hypervisor (Default) +# libkrun - Launch virtual machines using the libkrun platform, optimized +# for sharing GPU with the machine. #provider = "" # Rosetta supports running x86_64 Linux binaries on a Podman machine on Apple silicon. diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 21753f4f2..894153ed3 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -326,6 +326,8 @@ default_sysctls = [ # The compression format to use when pushing an image. # Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# `zstd:chunked` is incompatible with encrypting images, and will be treated as `zstd` with a warning +# in that case. # #compression_format = "gzip" diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 311b090b9..02ff1284c 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -120,12 +120,12 @@ var ( additionalHelperBinariesDir string defaultUnixComposeProviders = []string{ - "docker-compose", "$HOME/.docker/cli-plugins/docker-compose", "/usr/local/lib/docker/cli-plugins/docker-compose", "/usr/local/libexec/docker/cli-plugins/docker-compose", "/usr/lib/docker/cli-plugins/docker-compose", "/usr/libexec/docker/cli-plugins/docker-compose", + "docker-compose", "podman-compose", } @@ -231,7 +231,6 @@ func defaultConfig() (*Config, error) { DNSServers: attributedstring.Slice{}, DefaultCapabilities: attributedstring.NewSlice(DefaultCapabilities), DefaultSysctls: attributedstring.Slice{}, - DefaultUlimits: attributedstring.NewSlice(getDefaultProcessLimits()), Devices: attributedstring.Slice{}, EnableKeyring: true, EnableLabeling: selinuxEnabled(), diff --git a/vendor/github.com/containers/common/pkg/config/default_bsd.go b/vendor/github.com/containers/common/pkg/config/default_bsd.go index c619e1783..2e87dc8b0 100644 --- a/vendor/github.com/containers/common/pkg/config/default_bsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_bsd.go @@ -1,4 +1,4 @@ -//go:build (freebsd || netbsd || openbsd) +//go:build freebsd || netbsd || openbsd package config diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index 9e2ae4796..928ae9fa2 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -1,47 +1,13 @@ package config import ( - "fmt" "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -const ( - oldMaxSize = uint64(1048576) ) func getDefaultCgroupsMode() string { return "enabled" } -// getDefaultProcessLimits returns the nproc for the current process in ulimits format -// Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded -// to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501 -// In rootless containers this will fail, and the process will just use its current limits -func getDefaultProcessLimits() []string { - rlim := unix.Rlimit{Cur: oldMaxSize, Max: oldMaxSize} - oldrlim := rlim - // Attempt to set file limit and process limit to pid_max in OS - dat, err := os.ReadFile("/proc/sys/kernel/pid_max") - if err == nil { - val := strings.TrimSuffix(string(dat), "\n") - max, err := strconv.ParseUint(val, 10, 64) - if err == nil { - rlim = unix.Rlimit{Cur: max, Max: max} - } - } - defaultLimits := []string{} - if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { - defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) - } else if err := unix.Setrlimit(unix.RLIMIT_NPROC, &oldrlim); err == nil { - defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", oldrlim.Cur, oldrlim.Max)) - } - return defaultLimits -} - // getDefaultTmpDir for linux func getDefaultTmpDir() string { // first check the TMPDIR env var diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 70627cbda..610aa122b 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -42,11 +42,17 @@ func getLibpodTmpDir() string { } // getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) +// It is executed only if the machine provider is Hyper-V and it mimics WSL +// behavior where the host %USERPROFILE% drive (e.g. C:\) is automatically +// mounted in the guest under /mnt/ (e.g. /mnt/c/) func getDefaultMachineVolumes() []string { hd := homedir.Get() vol := filepath.VolumeName(hd) hostMnt := filepath.ToSlash(strings.TrimPrefix(hd, vol)) - return []string{fmt.Sprintf("%s:%s", hd, hostMnt)} + return []string{ + fmt.Sprintf("%s:%s", hd, hostMnt), + fmt.Sprintf("%s:%s", vol+"\\", "/mnt/"+strings.ToLower(vol[0:1])), + } } func getDefaultComposeProviders() []string { diff --git a/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go b/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go index ac17ac64d..302711260 100644 --- a/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go +++ b/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go @@ -56,7 +56,6 @@ func RuntimeConfigFilterWithOptions(ctx context.Context, options RuntimeConfigFi return nil, err } for i, hook := range options.Hooks { - hook := hook var stdout bytes.Buffer hookErr, err = RunWithOptions(ctx, RunOptions{Hook: &hook, Dir: options.Dir, State: data, Stdout: &stdout, PostKillTimeout: options.PostKillTimeout}) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/netns/netns_linux.go b/vendor/github.com/containers/common/pkg/netns/netns_linux.go index 3a3372889..0947b9f70 100644 --- a/vendor/github.com/containers/common/pkg/netns/netns_linux.go +++ b/vendor/github.com/containers/common/pkg/netns/netns_linux.go @@ -75,7 +75,7 @@ func NewNS() (ns.NetNS, error) { return nil, err } - for i := 0; i < 10000; i++ { + for range 10000 { nsName, err := getRandomNetnsName() if err != nil { return nil, err @@ -108,7 +108,7 @@ func NewNSFrom(fromNetns string) (string, error) { return "", err } - for i := 0; i < 10000; i++ { + for range 10000 { nsName, err := getRandomNetnsName() if err != nil { return "", err @@ -260,34 +260,37 @@ func newNSPath(nsPath string) (ns.NetNS, error) { // UnmountNS unmounts the given netns path func UnmountNS(nsPath string) error { // Only unmount if it's been bind-mounted (don't touch namespaces in /proc...) - if !strings.HasPrefix(nsPath, "/proc/") { - // EINVAL means the path exists but is not mounted, just try to remove the path below - if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil && !errors.Is(err, unix.EINVAL) { - // If path does not exists we can return without error as we have nothing to do. - if errors.Is(err, unix.ENOENT) { - return nil - } - - return fmt.Errorf("failed to unmount NS: at %s: %w", nsPath, err) + if strings.HasPrefix(nsPath, "/proc/") { + return nil + } + // EINVAL means the path exists but is not mounted, just try to remove the path below + if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil && !errors.Is(err, unix.EINVAL) { + // If path does not exists we can return without error as we have nothing to do. + if errors.Is(err, unix.ENOENT) { + return nil } - for { - if err := os.Remove(nsPath); err != nil { - if errors.Is(err, unix.EBUSY) { - // mount is still busy, sleep a moment and try again to remove - logrus.Debugf("Netns %s still busy, try removing it again in 10ms", nsPath) - time.Sleep(10 * time.Millisecond) - continue - } - // If path does not exists we can return without error. - if errors.Is(err, unix.ENOENT) { - break - } - return fmt.Errorf("failed to remove ns path: %w", err) + return fmt.Errorf("failed to unmount NS: at %s: %w", nsPath, err) + } + + var err error + // wait for up to 60s in the loop + for range 6000 { + if err = os.Remove(nsPath); err != nil { + if errors.Is(err, unix.EBUSY) { + // mount is still busy, sleep a moment and try again to remove + logrus.Debugf("Netns %s still busy, try removing it again in 10ms", nsPath) + time.Sleep(10 * time.Millisecond) + continue } - break + // If path does not exists we can return without error. + if errors.Is(err, unix.ENOENT) { + return nil + } + return fmt.Errorf("failed to remove ns path: %w", err) } + return nil } - return nil + return fmt.Errorf("failed to remove ns path (timeout after 60s): %w", err) } diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go index 5c4cfbe84..c2b1265c3 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -16,14 +16,20 @@ import ( "strings" "time" + // We are using skeema/knownhosts rather than + // golang.org/x/crypto/ssh/knownhosts because the + // latter has an issue when the first key returned + // by the server doesn't match the one in known_hosts: + // https://github.com/golang/go/issues/29286 + // https://github.com/containers/podman/issues/23575 "github.com/containers/common/pkg/config" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/pkg/sftp" "github.com/sirupsen/logrus" + "github.com/skeema/knownhosts" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" - "golang.org/x/crypto/ssh/knownhosts" "golang.org/x/exp/maps" ) @@ -301,46 +307,44 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection return nil, err } + keyFilePath := filepath.Join(homedir.Get(), ".ssh", "known_hosts") + known, err := knownhosts.NewDB(keyFilePath) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + keyDir := path.Dir(keyFilePath) + if err := fileutils.Exists(keyDir); errors.Is(err, os.ErrNotExist) { + if err := os.Mkdir(keyDir, 0o700); err != nil { + return nil, err + } + } + k, err := os.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE, 0o600) + if err != nil { + return nil, err + } + k.Close() + known, err = knownhosts.NewDB(keyFilePath) + if err != nil { + return nil, err + } + } + var callback ssh.HostKeyCallback if insecureIsMachineConnection { callback = ssh.InsecureIgnoreHostKey() } else { callback = ssh.HostKeyCallback(func(host string, remote net.Addr, pubKey ssh.PublicKey) error { - keyFilePath := filepath.Join(homedir.Get(), ".ssh", "known_hosts") - known, err := knownhosts.New(keyFilePath) - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - keyDir := path.Dir(keyFilePath) - if err := fileutils.Exists(keyDir); errors.Is(err, os.ErrNotExist) { - if err := os.Mkdir(keyDir, 0o700); err != nil { - return err - } - } - k, err := os.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE, 0o600) - if err != nil { - return err - } - k.Close() - known, err = knownhosts.New(keyFilePath) - if err != nil { - return err - } - } // we need to check if there is an error from reading known hosts for this public key and if there is an error, what is it, and why is it happening? // if it is a key mismatch we want to error since we know the host using another key // however, if it is a general error not because of a known key, we want to add our key to the known_hosts file - hErr := known(host, remote, pubKey) - var keyErr *knownhosts.KeyError - // if keyErr.Want is not empty, we are receiving a different key meaning the host is known but we are using the wrong key - as := errors.As(hErr, &keyErr) + hErr := known.HostKeyCallback()(host, remote, pubKey) switch { - case as && len(keyErr.Want) > 0: + case knownhosts.IsHostKeyChanged(hErr): logrus.Warnf("ssh host key mismatch for host %s, got key %s of type %s", host, ssh.FingerprintSHA256(pubKey), pubKey.Type()) - return keyErr + return hErr // if keyErr.Want is empty that just means we do not know this host yet, add it. - case as && len(keyErr.Want) == 0: + case knownhosts.IsHostUnknown(hErr): // write to known_hosts err := addKnownHostsEntry(host, pubKey) if err != nil { @@ -358,10 +362,11 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection } cfg := &ssh.ClientConfig{ - User: uri.User.Username(), - Auth: authMethods, - HostKeyCallback: callback, - Timeout: tick, + User: uri.User.Username(), + Auth: authMethods, + HostKeyCallback: callback, + Timeout: tick, + HostKeyAlgorithms: known.HostKeyAlgorithms(uri.Host), } return cfg, nil } diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index a6538ffb9..2c78cf1e6 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -155,6 +155,25 @@ func getMountsMap(path string) (string, string, error) { //nolint return "", "", fmt.Errorf("unable to get host and container dir from path: %s", path) } +// Return true iff the system is in FIPS mode as determined by reading +// /proc/sys/crypto/fips_enabled. +func shouldAddFIPSMounts() bool { + fips_enabled, err := os.ReadFile("/proc/sys/crypto/fips_enabled") + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logrus.Errorf("Failed to read /proc/sys/crypto/fips_enabled to determine FIPS state: %v", err) + } + return false + } + + if strings.TrimSpace(string(fips_enabled)) != "1" { + logrus.Debug("/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts") + return false + } + + return true +} + // MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem // mountLabel: MAC/SELinux label for container content // containerRunDir: Private data for storing subscriptions on the host mounted in container. @@ -194,22 +213,16 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, } } - // Only add FIPS subscription mount if disableFips=false - if disableFips { + // Only add FIPS subscription mount if disableFips is false and + // /proc/sys/crypto/fips_enabled contains "1" + if disableFips || !shouldAddFIPSMounts() { return subscriptionMounts } - // Add FIPS mode subscription if /etc/system-fips exists on the host - err := fileutils.Exists("/etc/system-fips") - switch { - case err == nil: - if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { - logrus.Errorf("Adding FIPS mode subscription to container: %v", err) - } - case errors.Is(err, os.ErrNotExist): - logrus.Debug("/etc/system-fips does not exist on host, not mounting FIPS mode subscription") - default: - logrus.Errorf("stat /etc/system-fips failed for FIPS mode subscription: %v", err) + + if err := addFIPSMounts(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { + logrus.Errorf("Adding FIPS mode bind mounts to container: %v", err) } + return subscriptionMounts } @@ -306,43 +319,97 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string return mounts, nil } -// addFIPSModeSubscription adds mounts to the `mounts` slice that are needed for the container to run openssl in FIPs mode -// (i.e: be FIPs compliant). -// It should only be called if /etc/system-fips exists on host. -// It primarily does two things: -// - creates /run/secrets/system-fips in the container root filesystem, and adds it to the `mounts` slice. -// - If `/etc/crypto-policies/back-ends` already exists inside of the container, it creates -// `/usr/share/crypto-policies/back-ends/FIPS` inside the container as well. -// It is done from within the container to ensure to avoid policy incompatibility between the container and host. -func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { +func containerHasEtcSystemFips(subscriptionsDir, mountPoint string) (bool, error) { + containerEtc, err := securejoin.SecureJoin(mountPoint, "etc") + if err != nil { + return false, fmt.Errorf("Container /etc resolution error: %w", err) + } + if fileutils.Lexists(filepath.Join(containerEtc, "system-fips")) != nil { + logrus.Debug("/etc/system-fips does not exist in the container, not creating /run/secrets/system-fips") + return false, nil + } + + fipsFileTarget, err := securejoin.SecureJoin(mountPoint, "etc/system-fips") + if err != nil { + return false, fmt.Errorf("Container /etc/system-fips resolution error: %w", err) + } + if fipsFileTarget != filepath.Join(mountPoint, subscriptionsDir, "system-fips") { + logrus.Warnf("/etc/system-fips exists in the container, but is not a symlink to %[1]v/system-fips; not creating %[1]v/system-fips", subscriptionsDir) + return false, nil + } + + return true, nil +} + +// addFIPSMounts adds mounts to the `mounts` slice that are needed +// for the container to run cryptographic libraries (openssl, gnutls, NSS, ...) +// in FIPS mode (i.e: be FIPS compliant). +// It should only be called if /proc/sys/crypto/fips_enabled on the host +// contains '1'. +// It does three things: +// - creates /run/secrets/system-fips in the container root filesystem if +// /etc/system-fips exists and is a symlink to /run/secrets/system-fips, +// and adds it to the `mounts` slice. This is, for example, the case on +// RHEL 8, but not on newer RHEL, since /etc/system-fips is deprecated. +// - Bind-mounts `/usr/share/crypto-policies/back-ends/FIPS` over +// `/etc/crypto-policies/back-ends` if the former exists inside of the +// container. This is done from within the container to avoid policy +// incompatibility between container and host. +// - If a bind mount for `/etc/crypto-policies/back-ends` was created, +// bind-mounts `/usr/share/crypto-policies/default-fips-config` over +// `/etc/crypto-policies/config` if the former exists inside of the +// container. If it does not exist, creates a new temporary file containing +// "FIPS\n", and bind-mounts that over `/etc/crypto-policies/config`. +// +// Starting in CentOS 10 Stream, the crypto-policies package gracefully recognizes the two bind mounts +// +// - /etc/crypto-policies/config -> /usr/share/crypto-policies/default-fips-config +// - /etc/crypto-policies/back-ends/FIPS -> /usr/share/crypto-policies/back-ends/FIPS +// +// and unmounts them when users manually change the policy, or removes and +// restores the mounts when the crypto-policies package is upgraded. +func addFIPSMounts(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { + // Check whether $container/etc/system-fips exists and is a symlink to /run/secrets/system-fips subscriptionsDir := "/run/secrets" - ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) - if err := fileutils.Exists(ctrDirOnHost); errors.Is(err, os.ErrNotExist) { - if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint - return err - } - if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { - return fmt.Errorf("applying correct labels on %q: %w", ctrDirOnHost, err) - } + + createSystemFipsSecret, err := containerHasEtcSystemFips(subscriptionsDir, mountPoint) + if err != nil { + return err } - fipsFile := filepath.Join(ctrDirOnHost, "system-fips") - // In the event of restart, it is possible for the FIPS mode file to already exist - if err := fileutils.Exists(fipsFile); errors.Is(err, os.ErrNotExist) { - file, err := os.Create(fipsFile) - if err != nil { - return fmt.Errorf("creating system-fips file in container for FIPS mode: %w", err) + if createSystemFipsSecret { + // This container contains + // /etc/system-fips -> /run/secrets/system-fips + // and expects podman to create this file if the container should + // be in FIPS mode + ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) + if err := fileutils.Exists(ctrDirOnHost); errors.Is(err, os.ErrNotExist) { + if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint + return err + } + if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { + return fmt.Errorf("applying correct labels on %q: %w", ctrDirOnHost, err) + } } - file.Close() - } + fipsFile := filepath.Join(ctrDirOnHost, "system-fips") - if !mountExists(*mounts, subscriptionsDir) { - m := rspec.Mount{ - Source: ctrDirOnHost, - Destination: subscriptionsDir, - Type: "bind", - Options: []string{"bind", "rprivate"}, + // In the event of restart, it is possible for the FIPS mode file to already exist + if err := fileutils.Exists(fipsFile); errors.Is(err, os.ErrNotExist) { + file, err := os.Create(fipsFile) + if err != nil { + return fmt.Errorf("creating system-fips file in container for FIPS mode: %w", err) + } + file.Close() + } + + if !mountExists(*mounts, subscriptionsDir) { + m := rspec.Mount{ + Source: ctrDirOnHost, + Destination: subscriptionsDir, + Type: "bind", + Options: []string{"bind", "rprivate"}, + } + *mounts = append(*mounts, m) } - *mounts = append(*mounts, m) } srcBackendDir := "/usr/share/crypto-policies/back-ends/FIPS" @@ -370,27 +437,44 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, // Make sure we set the config to FIPS so that the container does not overwrite // /etc/crypto-policies/back-ends when crypto-policies-scripts is reinstalled. - cryptoPoliciesConfigFile := filepath.Join(containerRunDir, "fips-config") - file, err := os.Create(cryptoPoliciesConfigFile) + // + // Starting in CentOS 10 Stream, crypto-policies provides + // /usr/share/crypto-policies/default-fips-config as bind mount source + // file and the crypto-policies tooling gracefully deals with the two bind-mounts + // /etc/crypto-policies/back-ends -> /usr/share/crypto-policies/back-ends/FIPS + // /etc/crypto-policies/config -> /usr/share/crypto-policies/default-fips-config + // if they both exist. + srcPolicyConfig := "/usr/share/crypto-policies/default-fips-config" + destPolicyConfig := "/etc/crypto-policies/config" + srcPolicyConfigOnHost, err := securejoin.SecureJoin(mountPoint, srcPolicyConfig) if err != nil { - return fmt.Errorf("creating fips config file in container for FIPS mode: %w", err) - } - defer file.Close() - if _, err := file.WriteString("FIPS\n"); err != nil { - return fmt.Errorf("writing fips config file in container for FIPS mode: %w", err) - } - if err = label.Relabel(cryptoPoliciesConfigFile, mountLabel, false); err != nil { - return fmt.Errorf("applying correct labels on fips-config file: %w", err) + return fmt.Errorf("Could not expand %q in container: %w", srcPolicyConfig, err) } - if err := file.Chown(uid, gid); err != nil { - return fmt.Errorf("chown fips-config file: %w", err) + + if err = fileutils.Exists(srcPolicyConfigOnHost); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("Could not check whether %q exists in container: %w", srcPolicyConfig, err) + } + + // /usr/share/crypto-policies/default-fips-config does not exist, let's create it ourselves + cryptoPoliciesConfigFile := filepath.Join(containerRunDir, "fips-config") + if err := os.WriteFile(cryptoPoliciesConfigFile, []byte("FIPS\n"), 0o644); err != nil { + return fmt.Errorf("Failed to write fips config file in container for FIPS mode: %w", err) + } + if err = label.Relabel(cryptoPoliciesConfigFile, mountLabel, false); err != nil { + return fmt.Errorf("Failed to apply correct labels on fips config file: %w", err) + } + if err := os.Chown(cryptoPoliciesConfigFile, uid, gid); err != nil { + return fmt.Errorf("Failed to chown fips config file: %w", err) + } + + srcPolicyConfigOnHost = cryptoPoliciesConfigFile } - policyConfig := "/etc/crypto-policies/config" - if !mountExists(*mounts, policyConfig) { + if !mountExists(*mounts, destPolicyConfig) { m := rspec.Mount{ - Source: cryptoPoliciesConfigFile, - Destination: policyConfig, + Source: srcPolicyConfigOnHost, + Destination: destPolicyConfig, Type: "bind", Options: []string{"bind", "rprivate"}, } diff --git a/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go b/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go index c1d8ed72e..ea61ecaa1 100644 --- a/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go @@ -74,7 +74,7 @@ func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { func MovePauseProcessToScope(pausePidPath string) { var err error - for i := 0; i < 10; i++ { + for range 10 { randBytes := make([]byte, 4) _, err = rand.Read(randBytes) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/timezone/timezone.go b/vendor/github.com/containers/common/pkg/timezone/timezone.go index 1fcbd5c42..b82dc309d 100644 --- a/vendor/github.com/containers/common/pkg/timezone/timezone.go +++ b/vendor/github.com/containers/common/pkg/timezone/timezone.go @@ -23,18 +23,20 @@ func ConfigureContainerTimeZone(timezone, containerRunDir, mountPoint, etcPath, switch { case timezone == "": return "", nil - case os.Getenv("TZDIR") != "": - // Allow using TZDIR per: - // https://sourceware.org/git/?p=glibc.git;a=blob;f=time/tzfile.c;h=8a923d0cccc927a106dc3e3c641be310893bab4e;hb=HEAD#l149 - - timezonePath = filepath.Join(os.Getenv("TZDIR"), timezone) case timezone == "local": timezonePath, err = filepath.EvalSymlinks("/etc/localtime") if err != nil { return "", fmt.Errorf("finding local timezone for container %s: %w", containerID, err) } default: - timezonePath = filepath.Join("/usr/share/zoneinfo", timezone) + // Allow using TZDIR per: + // https://sourceware.org/git/?p=glibc.git;a=blob;f=time/tzfile.c;h=8a923d0cccc927a106dc3e3c641be310893bab4e;hb=HEAD#l149 + zoneinfo := os.Getenv("TZDIR") + if zoneinfo == "" { + // default zoneinfo location + zoneinfo = "/usr/share/zoneinfo" + } + timezonePath = filepath.Join(zoneinfo, timezone) } etcFd, err := openDirectory(etcPath) diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 8f30e4688..98eee47e1 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.60.4" +const Version = "0.61.0" diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 996a4e2d7..5b26abb09 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -137,6 +137,17 @@ type Options struct { // DestinationCtx.CompressionFormat is used exclusively, and blobs of other // compression algorithms are not reused. ForceCompressionFormat bool + + // ReportResolvedReference, if set, asks the destination transport to store + // a “resolved” (more detailed) reference to the created image + // into the value this option points to. + // What “resolved” means is transport-specific. + // Most transports don’t support this, and cause the value to be set to nil. + // + // For the containers-storage: transport, the reference contains an image ID, + // so that storage.ResolveReference returns exactly the created image. + // WARNING: It is unspecified whether the reference also contains a reference.Named element. + ReportResolvedReference *types.ImageReference } // OptionCompressionVariant allows to supply information about @@ -193,35 +204,33 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, reportWriter = options.ReportWriter } + // safeClose amends retErr with an error from c.Close(), if any. + safeClose := func(name string, c io.Closer) { + err := c.Close() + if err == nil { + return + } + // Do not use %w for err as we don't want it to be unwrapped by callers. + if retErr != nil { + retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr) + } else { + retErr = fmt.Errorf(" (%s: %s)", name, err.Error()) + } + } + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) } dest := imagedestination.FromPublic(publicDest) - defer func() { - if err := dest.Close(); err != nil { - if retErr != nil { - retErr = fmt.Errorf(" (dest: %v): %w", err, retErr) - } else { - retErr = fmt.Errorf(" (dest: %v)", err) - } - } - }() + defer safeClose("dest", dest) publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) } rawSource := imagesource.FromPublic(publicRawSource) - defer func() { - if err := rawSource.Close(); err != nil { - if retErr != nil { - retErr = fmt.Errorf(" (src: %v): %w", err, retErr) - } else { - retErr = fmt.Errorf(" (src: %v)", err) - } - } - }() + defer safeClose("src", rawSource) // If reportWriter is not a TTY (e.g., when piping to a file), do not // print the progress bars to avoid long and hard to parse output. @@ -339,7 +348,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } } - if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil { + if options.ReportResolvedReference != nil { + *options.ReportResolvedReference = nil // The default outcome, if not specifically supported by the transport. + } + if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{ + UnparsedToplevel: c.unparsedToplevel, + ReportResolvedReference: options.ReportResolvedReference, + }); err != nil { return nil, fmt.Errorf("committing the finished image: %w", err) } diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 08128ce8d..59f41d216 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -24,13 +24,18 @@ func (c *copier) newProgressPool() *mpb.Progress { // customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar func customPartialBlobDecorFunc(s decor.Statistics) string { + current := decor.SizeB1024(s.Current) + total := decor.SizeB1024(s.Total) + refill := decor.SizeB1024(s.Refill) if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill) + } + // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%") + if s.Refill == 0 { + return fmt.Sprintf("%.1f / %.1f", current, total) } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage) } // progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 0ec54ded2..7ddfe917b 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -106,7 +106,7 @@ func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity if len(c.signers) == 1 { return nil, fmt.Errorf("creating signature: %w", err) } else { - return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err) + return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err) } } res = append(res, newSig) diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index ba414a22d..e008c7e86 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -323,10 +323,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst if err != nil { return fmt.Errorf("parsing image configuration: %w", err) } - wantedPlatforms, err := platform.WantedPlatforms(sys) - if err != nil { - return fmt.Errorf("getting current platform information %#v: %w", sys, err) - } + wantedPlatforms := platform.WantedPlatforms(sys) options := newOrderedSet() match := false @@ -822,11 +819,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil } - logrus.Debugf("Failed to retrieve partial blob: %v", err) - return false, types.BlobInfo{}, nil + // On a "partial content not available" error, ignore it and retrieve the whole layer. + var perr private.ErrFallbackToOrdinaryLayerDownload + if errors.As(err, &perr) { + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{}, nil + } + return false, types.BlobInfo{}, err }() if err != nil { - return types.BlobInfo{}, "", err + return types.BlobInfo{}, "", fmt.Errorf("partial pull of blob %s: %w", srcInfo.Digest, err) } if reused { return blobInfo, cachedDiffID, nil diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index c9b390318..000c8987d 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -251,14 +251,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa return nil } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { return nil } diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go index 632ee7c49..9e0d32007 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/dest.go +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -34,16 +34,17 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat writer = w closeWriter = true } - tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref) - if sys != nil && sys.DockerArchiveAdditionalTags != nil { - tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) - } - return &archiveImageDestination{ - Destination: tarDest, + d := &archiveImageDestination{ ref: ref, writer: writer, closeWriter: closeWriter, - }, nil + } + tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref, d.CommitWithOptions) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + d.Destination = tarDest + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -60,14 +61,11 @@ func (d *archiveImageDestination) Close() error { return nil } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { d.writer.imageCommitted() if d.closeWriter { // We could do this only in .Close(), but failures in .Close() are much more likely to be diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go index 7d66ef6bc..e69e21ef7 100644 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -6,7 +6,7 @@ import ( "fmt" "io" "math" - "math/rand" + "math/rand/v2" "net/http" "net/url" "strconv" @@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) { logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise } br.body = nil - time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede + time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede headers := map[string][]string{ "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, @@ -197,7 +197,7 @@ func (br *bodyReader) Read(p []byte) (int, error) { consumedBody = true br.body = res.Body br.lastRetryOffset = br.offset - br.lastRetryTime = time.Time{} + br.lastRetryTime = time.Now() return n, nil default: diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 354af2140..64ccf6ae5 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -80,6 +80,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) { return &http.Client{ Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsc, }, CheckRedirect: dockerclient.CheckRedirect, @@ -89,6 +90,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) { func httpConfig() *http.Client { return &http.Client{ Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, TLSClientConfig: nil, }, CheckRedirect: dockerclient.CheckRedirect, diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index 9b880a2e7..4a59a6a61 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -56,16 +56,17 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem goroutineContext, goroutineCancel := context.WithCancel(ctx) go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) - return &daemonImageDestination{ + d := &daemonImageDestination{ ref: ref, mustMatchRuntimeOS: mustMatchRuntimeOS, - Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef), archive: archive, goroutineCancel: goroutineCancel, statusChannel: statusChannel, writer: writer, committed: false, - }, nil + } + d.Destination = tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef, d.CommitWithOptions) + return d, nil } // imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel @@ -146,7 +147,7 @@ func (d *daemonImageDestination) Close() error { // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. // Whether that works or not, closing the PipeWriter seems desirable in any case. - if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil { + if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .CommitWithOptions()")); err != nil { return err } } @@ -159,14 +160,11 @@ func (d *daemonImageDestination) Reference() types.ImageReference { return d.ref } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { logrus.Debugf("docker-daemon: Closing tar stream") if err := d.archive.Close(); err != nil { return err diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 97d97fed5..bd024422b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -42,7 +42,6 @@ const ( dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" - resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" @@ -936,34 +935,6 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } if err != nil { err = fmt.Errorf("pinging container registry %s: %w", c.registry, err) - if c.sys != nil && c.sys.DockerDisableV1Ping { - return err - } - // best effort to understand if we're talking to a V1 registry - pingV1 := func(scheme string) bool { - pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) - if err != nil { - return false - } - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) - return false - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return false - } - return true - } - isV1 := pingV1("https") - if !isV1 && c.tlsClientConfig.InsecureSkipVerify { - isV1 = pingV1("http") - } - if isV1 { - err = ErrV1NotSupported - } } return err } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 9741afc3f..74f559dce 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. } for _, tag := range tagsHolder.Tags { if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values + // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary + // to the spec, may include JSON null values in the list; and Go silently parses them as "". + if tag == "" { + logrus.Debugf("Ignoring invalid empty tag") + continue + } // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, // contrary to the tag format specified in // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index ed3d4a2c0..f5e2bb61e 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -923,13 +923,10 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context return nil } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { return nil } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index c8f6ba305..6e44ce096 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -116,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef // Don’t just build a string, try to preserve the typed error. primary := &attempts[len(attempts)-1] extras := []string{} - for i := 0; i < len(attempts)-1; i++ { + for _, attempt := range attempts[:len(attempts)-1] { // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. // The paired [] at least have some chance of being unambiguous. - extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err)) + extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err)) } return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) } @@ -464,26 +464,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc var res []signature.Signature switch { case s.c.supportsSignatures: - sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigs...) case s.c.signatureBase != nil: - sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigs...) default: return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") } - sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigstoreSigs...) return res, nil } @@ -505,35 +499,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest * return manifest.Digest(s.cachedManifest) } -// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, +// which is not nil, storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := []signature.Signature{} for i := 0; ; i++ { if i >= maxLookasideSignatures { - return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) + return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) } sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) if err != nil { - return nil, err + return err } signature, missing, err := s.getOneSignature(ctx, sigURL) if err != nil { - return nil, err + return err } if missing { break } - signatures = append(signatures, signature) + *dest = append(*dest, signature) } - return signatures, nil + return nil } // getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) @@ -596,48 +590,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL } } -// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, +// storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) if err != nil { - return nil, err + return err } - var sigs []signature.Signature for _, sig := range parsedBody.Signatures { if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) + *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content)) } } - return sigs, nil + return nil } -func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention, +// storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { if !s.c.useSigstoreAttachments { logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") - return nil, nil + return nil } manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) if err != nil { - return nil, err + return err } if ociManifest == nil { - return nil, nil + return nil } logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) - res := []signature.Signature{} for layerIndex, layer := range ociManifest.Layers { // Note that this copies all kinds of attachments: attestations, and whatever else is there, // not just signatures. We leave the signature consumers to decide based on the MIME type. @@ -648,11 +645,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, none.NoCache) if err != nil { - return nil, err + return err } - res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) + *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) } - return res, nil + return nil } // deleteImage deletes the named image from the registry, if supported. @@ -830,7 +827,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) handleBufferedNetworkReader(&br) }() - for i := uint(0); i < nBuffers; i++ { + for range nBuffers { b := bufferedNetworkReaderBuffer{ data: make([]byte, bufferSize), } diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 4392f9d18..e749b5014 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -12,6 +12,7 @@ import ( var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. + // Deprecated: The V1 container registry detection is no longer performed, so this error is never returned. ErrV1NotSupported = errors.New("can't talk to a V1 container registry") // ErrTooManyRequests is returned when the status code returned is 429 ErrTooManyRequests = errors.New("too many requests to registry") diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index d5446840b..f14234680 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -20,22 +20,25 @@ import ( "github.com/sirupsen/logrus" ) -// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer. +// Destination is a partial implementation of private.ImageDestination for writing to a Writer. type Destination struct { impl.Compat impl.PropertyMethodsInitialize stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize - archive *Writer - repoTags []reference.NamedTagged + archive *Writer + commitWithOptions func(ctx context.Context, options private.CommitOptions) error + repoTags []reference.NamedTagged // Other state. config []byte sysCtx *types.SystemContext } // NewDestination returns a tarfile.Destination adding images to the specified Writer. -func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination { +// commitWithOptions implements ImageDestination.CommitWithOptions. +func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged, + commitWithOptions func(ctx context.Context, options private.CommitOptions) error) *Destination { repoTags := []reference.NamedTagged{} if ref != nil { repoTags = append(repoTags, ref) @@ -57,9 +60,10 @@ func NewDestination(sys *types.SystemContext, archive *Writer, transportName str NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName), NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"), - archive: archive, - repoTags: repoTags, - sysCtx: sys, + archive: archive, + commitWithOptions: commitWithOptions, + repoTags: repoTags, + sysCtx: sys, } dest.Compat = impl.AddCompat(dest) return dest @@ -179,3 +183,13 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags) } + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *Destination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + // This indirection exists because impl.Compat expects all ImageDestinationInternalOnly methods + // to be implemented in one place. + return d.commitWithOptions(ctx, options) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go index 47c169a1f..70b207d9b 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go @@ -99,3 +99,16 @@ func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanc } return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest) } + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return c.dest.CommitWithOptions(ctx, private.CommitOptions{ + UnparsedToplevel: unparsedToplevel, + }) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go index bbb53c198..22bed4b0f 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -36,8 +36,9 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. +// Even if SupportsPutBlobPartial() returns true, the call can fail. +// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. +// The fallback _must not_ be done otherwise. func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index f5a38541a..0fe902e31 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -97,3 +97,11 @@ func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []sign } return w.PutSignatures(ctx, simpleSigs, instanceDigest) } + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + return w.Commit(ctx, options.UnparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index f847fa9cc..07922cece 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont // ChooseInstance parses blob as a schema2 manifest list, and returns the digest // of the image which is appropriate for the current environment. func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } + wantedPlatforms := platform.WantedPlatforms(ctx) for _, wantedPlatform := range wantedPlatforms { for _, d := range list.Manifests { imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index fe78efaeb..6a0f88d3a 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -236,10 +236,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi if preferGzip == types.OptionalBoolTrue { didPreferGzip = true } - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } + wantedPlatforms := platform.WantedPlatforms(ctx) var bestMatch *instanceCandidate bestMatch = nil for manifestIndex, d := range index.Manifests { diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index afdce1d3d..3a16dad63 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -153,7 +153,7 @@ var compatibility = map[string][]string{ // WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, // the most compatible platform is first. // If some option (arch, os, variant) is not present, a value from current platform is detected. -func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { +func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform { // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. // The fields are not specified by the OCI specification, as of version 1.1, usefully enough // to be interoperable, anyway. @@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { Variant: v, }) } - return res, nil + return res } // MatchesPlatform returns true if a platform descriptor from a multi-arch image matches diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index d81ea6703..4247a8db7 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -53,8 +53,9 @@ type ImageDestinationInternalOnly interface { // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). - // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller - // should fall back to PutBlobWithOptions. + // Even if SupportsPutBlobPartial() returns true, the call can fail. + // If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. + // The fallback _must not_ be done otherwise. PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -69,6 +70,12 @@ type ImageDestinationInternalOnly interface { // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. // MUST be called after PutManifest (signatures may reference manifest contents). PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error + + // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before CommitWithOptions() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) + CommitWithOptions(ctx context.Context, options CommitOptions) error } // ImageDestination is an internal extension to the types.ImageDestination @@ -145,6 +152,19 @@ type ReusedBlob struct { MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } +// CommitOptions are used in CommitWithOptions +type CommitOptions struct { + // UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list + // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the + // original manifest list digest, if desired. + UnparsedToplevel types.UnparsedImage + // ReportResolvedReference, if set, asks the transport to store a “resolved” (more detailed) reference to the created image + // into the value this option points to. + // What “resolved” means is transport-specific. + // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. + ReportResolvedReference *types.ImageReference +} + // ImageSourceChunk is a portion of a blob. // This API is experimental and can be changed without bumping the major version number. type ImageSourceChunk struct { @@ -183,3 +203,22 @@ type UnparsedImage interface { // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) } + +// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial. +// It suggests to the caller that a fallback mechanism can be used instead of a hard failure; +// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob. +type ErrFallbackToOrdinaryLayerDownload struct { + err error +} + +func (c ErrFallbackToOrdinaryLayerDownload) Error() string { + return c.err.Error() +} + +func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error { + return c.err +} + +func NewErrFallbackToOrdinaryLayerDownload(err error) error { + return ErrFallbackToOrdinaryLayerDownload{err: err} +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 222aa896e..b74a1e240 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { // Add the history and rootfs information. rootfs, err := json.Marshal(rootFS) if err != nil { - return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err) } rawRootfs := json.RawMessage(rootfs) raw["rootfs"] = &rawRootfs history, err := json.Marshal(convertedHistory) if err != nil { - return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err) + return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err) } rawHistory := json.RawMessage(history) raw["history"] = &rawHistory // Encode the result. config, err = json.Marshal(raw) if err != nil { - return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err) + return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err) } return config, nil } diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index f714574ee..0faa866b7 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { if err := json.Unmarshal(manifestBlob, &oci1); err != nil { return nil, err } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex, + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest, manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index a3eb5d7a1..1d1e26c84 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -117,8 +117,9 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. +// Even if SupportsPutBlobPartial() returns true, the call can fail. +// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. +// The fallback _must not_ be done otherwise. func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } @@ -149,13 +150,12 @@ func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest) } -// Commit marks the process of storing the image as successful and asks for the image to be persisted -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. -// after the directory is made, it is tarred up into a file and the directory is deleted -func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + if err := d.unpackedDest.CommitWithOptions(ctx, options); err != nil { return fmt.Errorf("storing image %q: %w", d.ref.image, err) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go index bcf257df6..08366a7e2 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -27,17 +27,8 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex return err } - var blobsUsedByImage map[digest.Digest]int - - switch descriptor.MediaType { - case imgspecv1.MediaTypeImageManifest: - blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir) - case imgspecv1.MediaTypeImageIndex: - blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir) - default: - return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) - } - if err != nil { + blobsUsedByImage := make(map[digest.Digest]int) + if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil { return err } @@ -54,82 +45,48 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex return ref.deleteReferenceFromIndex(descriptorIndex) } -func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { - manifest, err := ref.getManifest(descriptor, sharedBlobsDir) - if err != nil { - return nil, err - } - blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest) - blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference - - return blobsUsedInManifest, nil -} - -func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { +// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself. +func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error { blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) if err != nil { - return nil, err - } - index, err := parseIndex(blobPath) - if err != nil { - return nil, err + return err } - blobsUsedInImageRefIndex := make(map[digest.Digest]int) - err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir) - if err != nil { - return nil, err + dest[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := parseJSON[imgspecv1.Manifest](blobPath) + if err != nil { + return err + } + dest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + dest[layer.Digest]++ + } + case imgspecv1.MediaTypeImageIndex: + index, err := parseIndex(blobPath) + if err != nil { + return err + } + if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) } - blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference - - return blobsUsedInImageRefIndex, nil + return nil } -// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map -func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { +// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself. +func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { for _, descriptor := range index.Manifests { - destination[descriptor.Digest]++ - switch descriptor.MediaType { - case imgspecv1.MediaTypeImageManifest: - manifest, err := ref.getManifest(&descriptor, sharedBlobsDir) - if err != nil { - return err - } - for digest, count := range ref.getBlobsUsedInManifest(manifest) { - destination[digest] += count - } - case imgspecv1.MediaTypeImageIndex: - blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) - if err != nil { - return err - } - index, err := parseIndex(blobPath) - if err != nil { - return err - } - err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir) - if err != nil { - return err - } - default: - return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil { + return err } } - return nil } -func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int { - blobsUsedInManifest := make(map[digest.Digest]int, 0) - - blobsUsedInManifest[manifest.Config.Digest]++ - for _, layer := range manifest.Layers { - blobsUsedInManifest[layer.Digest]++ - } - - return blobsUsedInManifest -} - // This takes in a map of the digest and their usage count in the manifest to be deleted // It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { @@ -138,7 +95,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges return nil, err } blobsUsedInRootIndex := make(map[digest.Digest]int) - err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) if err != nil { return nil, err } @@ -224,17 +181,3 @@ func saveJSON(path string, content any) error { return json.NewEncoder(file).Encode(content) } - -func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) { - manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) - if err != nil { - return nil, err - } - - manifest, err := parseJSON[imgspecv1.Manifest](manifestPath) - if err != nil { - return nil, err - } - - return manifest, nil -} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index a096afe0f..85374cecf 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -278,14 +278,11 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ Version: imgspecv1.ImageLayoutVersion, }) diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index fff586bee..cef3dcccf 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -365,7 +365,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err if len(clusterInfo.CertificateAuthority) != 0 { err := validateFileIsReadable(clusterInfo.CertificateAuthority) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) } } @@ -403,13 +403,13 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { if len(authInfo.ClientCertificate) != 0 { err := validateFileIsReadable(authInfo.ClientCertificate) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) } } if len(authInfo.ClientKey) != 0 { err := validateFileIsReadable(authInfo.ClientKey) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) } } } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 4170d6e20..61f69e93f 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -125,8 +125,9 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. +// Even if SupportsPutBlobPartial() returns true, the call can fail. +// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. +// The fallback _must not_ be done otherwise. func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } @@ -235,13 +236,10 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, return nil } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.docker.Commit(ctx, unparsedToplevel) +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + return d.docker.CommitWithOptions(ctx, options) } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 951b5d098..d4ebe413b 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -435,7 +435,11 @@ func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, si return nil } -func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error { +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *ostreeImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { runtime.LockOSThread() defer runtime.UnlockOSThread() diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index d8899f2ed..f7103d13e 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -238,8 +238,9 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool { // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. +// Even if SupportsPutBlobPartial() returns true, the call can fail. +// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. +// The fallback _must not_ be done otherwise. func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } @@ -306,6 +307,10 @@ func (d *blobCacheDestination) PutSignaturesWithFormat(ctx context.Context, sign return d.destination.PutSignaturesWithFormat(ctx, signatures, instanceDigest) } -func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.destination.Commit(ctx, unparsedToplevel) +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *blobCacheDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + return d.destination.CommitWithOptions(ctx, options) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 40ed09bed..d73aafbdb 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -200,8 +200,6 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the // number of entries to limit for known and unknown location separately, only to make testing simpler. -// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original -// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { // split unknown candidates and known candidates // and limit them separately. diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index b83a257e4..782c86d06 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -99,8 +99,18 @@ func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, return internal.AlgorithmCompressor(algo)(dest, m, level) } -// CompressStreamWithMetadata returns the compressor by its name. If the compression -// generates any metadata, it is written to the provided metadata map. +// CompressStreamWithMetadata returns the compressor by its name. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// Such a partial decompression is not implemented by this package; it is consumed e.g. by +// github.com/containers/storage/pkg/chunked . +// +// If the compression generates such metadata, it is written to the provided metadata map. func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { return internal.AlgorithmCompressor(algo)(dest, metadata, level) } diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go index d6f85274d..e715705b4 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -3,6 +3,15 @@ package internal import "io" // CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// If the compression generates such metadata, it is written to the provided metadata map. +// // The caller must call Close() on the stream (even if the input stream does not need closing!). type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index 604c21c08..bddaca690 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -40,17 +40,20 @@ type UntrustedRekorPayload struct { // A compile-time check that UntrustedRekorSET implements json.Unmarshaler var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) -// UnmarshalJSON implements the json.Unmarshaler interface -func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } +// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. +// All other errors are returned as is. +func JSONFormatToInvalidSignatureError(err error) error { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) } return err } +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { @@ -77,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { - err := p.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } - } - return err + return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data)) } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index 8630b08e3..90a81dc1c 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -80,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } - } - return err + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. @@ -127,7 +121,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { - return NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + return NewInvalidSignatureError("Field optional.timestamp is not an integer") } s.untrustedTimestamp = &intTimestamp } diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 8e7665c4b..8de705c22 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -51,28 +51,39 @@ func (err InvalidPolicyFormatError) Error() string { // NOTE: When this function returns an error, report it to the user and abort. // DO NOT hard-code fallback policies in your application. func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { - return NewPolicyFromFile(defaultPolicyPath(sys)) + policyPath, err := defaultPolicyPath(sys) + if err != nil { + return nil, err + } + return NewPolicyFromFile(policyPath) } -// defaultPolicyPath returns a path to the default policy of the system. -func defaultPolicyPath(sys *types.SystemContext) string { - return defaultPolicyPathWithHomeDir(sys, homedir.Get()) +// defaultPolicyPath returns a path to the relevant policy of the system, or an error if the policy is missing. +func defaultPolicyPath(sys *types.SystemContext) (string, error) { + policyFilePath, err := defaultPolicyPathWithHomeDir(sys, homedir.Get(), systemDefaultPolicyPath) + if err != nil { + return "", err + } + return policyFilePath, nil } // defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, -// it exists only to allow testing it with an artificial home directory. -func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string { +// it exists only to allow testing it with artificial paths. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string, systemPolicyPath string) (string, error) { if sys != nil && sys.SignaturePolicyPath != "" { - return sys.SignaturePolicyPath + return sys.SignaturePolicyPath, nil } userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) if err := fileutils.Exists(userPolicyFilePath); err == nil { - return userPolicyFilePath + return userPolicyFilePath, nil } if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemPolicyPath), nil + } + if err := fileutils.Exists(systemPolicyPath); err == nil { + return systemPolicyPath, nil } - return systemDefaultPolicyPath + return "", fmt.Errorf("no policy.json file found at any of the following: %q, %q", userPolicyFilePath, systemPolicyPath) } // NewPolicyFromFile returns a policy configured in the specified file. diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go index 48dbfbbde..390957b02 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) newParsedRef, err := reference.ParseNamed(newNamedRef) if err != nil { - return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err) + return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err) } return newParsedRef, nil } diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go index 30df997d8..94a846593 100644 --- a/vendor/github.com/containers/image/v5/signature/simple.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -105,13 +105,7 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (s *untrustedSignature) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(internal.JSONFormatError); ok { - err = internal.NewInvalidSignatureError(formatErr.Error()) - } - } - return err + return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. @@ -149,7 +143,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { - return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer") } s.untrustedTimestamp = &intTimestamp } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 842a3ab06..51fac71e4 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -311,23 +311,31 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { +// Even if SupportsPutBlobPartial() returns true, the call can fail. +// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. +// The fallback _must not_ be done otherwise. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) { fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, blobInfo: srcInfo, } + defer func() { + var perr chunked.ErrFallbackToOrdinaryLayerDownload + if errors.As(retErr, &perr) { + retErr = private.NewErrFallbackToOrdinaryLayerDownload(retErr) + } + }() + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { return private.UploadedBlob{}, err } - out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) + out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ) if err != nil { - return private.UploadedBlob{}, err + return private.UploadedBlob{}, fmt.Errorf("staging a partially-pulled layer: %w", err) } succeeded := false defer func() { @@ -337,7 +345,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces }() if out.TOCDigest == "" && out.UncompressedDigest == "" { - return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set") + return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set") } blobDigest := srcInfo.Digest @@ -356,11 +364,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is // responsible for ensuring blobDigest has been validated. if out.CompressedDigest != blobDigest { - return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q", + return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", out.CompressedDigest, blobDigest) } // So, record also information about blobDigest, that might benefit reuse. - // We trust ApplyDiffWithDiffer to validate or create both values correctly. + // We trust PrepareStagedLayer to validate or create both values correctly. s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) } else { @@ -922,7 +930,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D flags := make(map[string]interface{}) if untrustedUncompressedDigest != "" { - flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest + flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest.String() logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID) } @@ -1116,26 +1124,23 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D return s.untrustedDiffIDValues[layerIndex], nil } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. if len(s.manifest) == 0 { - return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()") } - toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx) if err != nil { return fmt.Errorf("retrieving top-level manifest: %w", err) } // If the name we're saving to includes a digest, then check that the // manifests that we're about to save all either match the one from the - // unparsedToplevel, or match the digest in the name that we're using. + // options.UnparsedToplevel, or match the digest in the name that we're using. if s.imageRef.named != nil { if digested, ok := s.imageRef.named.(reference.Digested); ok { matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) @@ -1168,24 +1173,24 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t }, blob.Size); err != nil { return err } else if stopQueue { - return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest) + return fmt.Errorf("Internal error: storageImageDestination.CommitWithOptions(): commitLayer() not ready to commit for layer %q", blob.Digest) } } var lastLayer string if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts. prev, ok := s.indexToStorageID[len(layerBlobs)-1] if !ok { - return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) + return fmt.Errorf("Internal error: storageImageDestination.CommitWithOptions(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) } lastLayer = prev } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image // was originally created, in case we're just copying it. If not, no harm done. - options := &storage.ImageOptions{} + imgOptions := &storage.ImageOptions{} if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { logrus.Debugf("setting image creation date to %s", inspect.Created) - options.CreationDate = *inspect.Created + imgOptions.CreationDate = *inspect.Created } // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so @@ -1202,13 +1207,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t if err != nil { return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) } - options.BigData = append(options.BigData, storage.ImageBigDataOption{ + imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: blob.String(), Data: v, Digest: digest.Canonical.FromBytes(v), }) } - // Set up to save the unparsedToplevel's manifest if it differs from + // Set up to save the options.UnparsedToplevel's manifest if it differs from // the per-platform one, which is saved below. if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { manifestDigest, err := manifest.Digest(toplevelManifest) @@ -1219,7 +1224,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t if err != nil { return err } - options.BigData = append(options.BigData, storage.ImageBigDataOption{ + imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: key, Data: toplevelManifest, Digest: manifestDigest, @@ -1232,19 +1237,19 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t if err != nil { return err } - options.BigData = append(options.BigData, storage.ImageBigDataOption{ + imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: key, Data: s.manifest, Digest: s.manifestDigest, }) - options.BigData = append(options.BigData, storage.ImageBigDataOption{ + imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: storage.ImageDigestBigDataKey, Data: s.manifest, Digest: s.manifestDigest, }) // Set up to save the signatures, if we have any. if len(s.signatures) > 0 { - options.BigData = append(options.BigData, storage.ImageBigDataOption{ + imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: "signatures", Data: s.signatures, Digest: digest.Canonical.FromBytes(s.signatures), @@ -1255,7 +1260,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t if err != nil { return err } - options.BigData = append(options.BigData, storage.ImageBigDataOption{ + imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: key, Data: signatures, Digest: digest.Canonical.FromBytes(signatures), @@ -1268,7 +1273,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t return fmt.Errorf("encoding metadata for image: %w", err) } if len(metadata) != 0 { - options.Metadata = string(metadata) + imgOptions.Metadata = string(metadata) } // Create the image record, pointing to the most-recently added layer. @@ -1280,7 +1285,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } } oldNames := []string{} - img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", imgOptions) if err != nil { if !errors.Is(err, storage.ErrDuplicateID) { logrus.Debugf("error creating image: %q", err) @@ -1301,21 +1306,21 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // sizes (tracked in the metadata) which might have already // been present with new values, when ideally we'd find a way // to merge them since they all apply to the same image - for _, data := range options.BigData { + for _, data := range imgOptions.BigData { if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil { logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err) return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err) } } - if options.Metadata != "" { - if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil { + if imgOptions.Metadata != "" { + if err := s.imageRef.transport.store.SetMetadata(img.ID, imgOptions.Metadata); err != nil { logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) return fmt.Errorf("saving metadata for image %q: %w", img.ID, err) } - logrus.Debugf("saved image metadata %q", options.Metadata) + logrus.Debugf("saved image metadata %q", imgOptions.Metadata) } } else { - logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata) + logrus.Debugf("created new image ID %q with metadata %q", img.ID, imgOptions.Metadata) } // Clean up the unfinished image on any error. @@ -1338,6 +1343,21 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } logrus.Debugf("added name %q to image %q", name, img.ID) } + if options.ReportResolvedReference != nil { + // FIXME? This is using nil for the named reference. + // It would be better to also use s.imageRef.named, because that allows us to resolve to the right + // digest / manifest (and corresponding signatures). + // The problem with that is that resolving such a reference fails if the s.imageRef.named name is moved to a different image + // (because it is a tag that moved, or because we have pulled “the same” image for a different architecture). + // Right now (2024-11), ReportResolvedReference is only used in c/common/libimage, where the caller only extracts the image ID, + // so the name does not matter; to give us options, copy.Options.ReportResolvedReference is explicitly refusing to document + // whether the value contains a name. + resolved, err := newReference(s.imageRef.transport, nil, intendedID) + if err != nil { + return fmt.Errorf("creating a resolved reference for (%s, %s): %w", s.imageRef.StringWithinTransport(), intendedID, err) + } + *options.ReportResolvedReference = resolved + } commitSucceeded = true return nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 2a1099f67..acc4cb30e 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -37,7 +37,7 @@ func newReference(transport storageTransport, named reference.Named, id string) } if id != "" { if err := validateImageID(id); err != nil { - return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference) + return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference) } } // We take a copy of the transport, which contains a pointer to the diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 4f501fc22..55788f887 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "os" + "slices" "sync" "github.com/containers/image/v5/docker/reference" @@ -300,7 +301,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed } - physicalBlobInfos := []types.BlobInfo{} + physicalBlobInfos := []types.BlobInfo{} // Built reversed layerID := s.image.TopLayer for layerID != "" { layer, err := s.imageRef.transport.store.Layer(layerID) @@ -340,9 +341,10 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige Size: size, MediaType: uncompressedLayerType, } - physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + physicalBlobInfos = append(physicalBlobInfos, blobInfo) layerID = layer.Parent } + slices.Reverse(physicalBlobInfos) res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 18d4cc2d2..7d4a83bc9 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -103,7 +103,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). if _, err := io.Copy(io.Discard, reader); err != nil { - return nil, fmt.Errorf("error reading %q: %v", filename, err) + return nil, fmt.Errorf("error reading %q: %w", filename, err) } if uncompressed != nil { uncompressed.Close() @@ -152,7 +152,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Encode and digest the image configuration blob. configBytes, err := json.Marshal(&config) if err != nil { - return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + return nil, fmt.Errorf("error generating configuration blob for %q: %w", strings.Join(r.filenames, separator), err) } configID := digest.Canonical.FromBytes(configBytes) blobs[configID] = tarballBlob{ @@ -177,7 +177,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Encode the manifest. manifestBytes, err := json.Marshal(&manifest) if err != nil { - return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + return nil, fmt.Errorf("error generating manifest for %q: %w", strings.Join(r.filenames, separator), err) } // Return the image. diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index 63d835530..b33208a51 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -38,13 +38,13 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc if filename == "-" { stdin, err = io.ReadAll(os.Stdin) if err != nil { - return nil, fmt.Errorf("error buffering stdin: %v", err) + return nil, fmt.Errorf("error buffering stdin: %w", err) } continue } f, err := os.Open(filename) if err != nil { - return nil, fmt.Errorf("error opening %q: %v", filename, err) + return nil, fmt.Errorf("error opening %q: %w", filename, err) } f.Close() } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 7d6097346..9a7a0da2b 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -643,6 +643,7 @@ type SystemContext struct { // if true, a V1 ping attempt isn't done to give users a better error. Default is false. // Note that this field is used mainly to integrate containers/image into projectatomic/docker // in order to not break any existing docker's integration tests. + // Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect. DockerDisableV1Ping bool // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list DockerDisableDestSchema1MIMETypes bool diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 64e468725..3743721fc 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 32 + VersionMinor = 33 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go b/vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go index 9d6ae1947..a19c00b06 100644 --- a/vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go @@ -29,14 +29,23 @@ var ( ) // validateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) or the special string HostGateway. +// ExtraHost is in the form of name1;name2;name3:ip where the ip has to be a valid ip (ipv4 or ipv6) or the special string HostGateway. // for add-host flag func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" - name, ip, hasIP := strings.Cut(val, ":") - if !hasIP || len(name) == 0 { + names, ip, hasIP := strings.Cut(val, ":") + if !hasIP || len(names) == 0 { return "", fmt.Errorf("bad format for add-host: %q", val) } + + // Split the hostnames by semicolon and validate each one + nameList := strings.Split(names, ";") + for _, name := range nameList { + if len(name) == 0 { + return "", fmt.Errorf("hostname in add-host %q is empty", val) + } + } + if ip == etchosts.HostGateway { return val, nil } diff --git a/vendor/github.com/containers/podman/v5/libpod/container.go b/vendor/github.com/containers/podman/v5/libpod/container.go index e424afa9f..423443072 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container.go +++ b/vendor/github.com/containers/podman/v5/libpod/container.go @@ -1261,6 +1261,17 @@ func (c *Container) AutoRemove() bool { return spec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue } +// AutoRemoveImage indicates that the container will automatically remove the +// image it is using after it exits and is removed. +// Only allowed if AutoRemove is true. +func (c *Container) AutoRemoveImage() bool { + spec := c.config.Spec + if spec.Annotations == nil { + return false + } + return spec.Annotations[define.InspectAnnotationAutoremoveImage] == define.InspectResponseTrue +} + // Timezone returns the timezone configured inside the container. // Local means it has the same timezone as the host machine func (c *Container) Timezone() string { diff --git a/vendor/github.com/containers/podman/v5/libpod/container_api.go b/vendor/github.com/containers/podman/v5/libpod/container_api.go index 4cab79e25..cae6817ec 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_api.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_api.go @@ -9,7 +9,6 @@ import ( "io" "net/http" "os" - "sync" "time" "github.com/containers/common/pkg/resize" @@ -40,7 +39,10 @@ func (c *Container) Init(ctx context.Context, recursive bool) error { return err } } + return c.initUnlocked(ctx, recursive) +} +func (c *Container) initUnlocked(ctx context.Context, recursive bool) error { if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) { return fmt.Errorf("container %s has already been created in runtime: %w", c.ID(), define.ErrCtrStateInvalid) } @@ -115,7 +117,7 @@ func (c *Container) Start(ctx context.Context, recursive bool) (finalErr error) // Update updates the given container. // Either resource limits or restart policy can be updated. -// Either resourcs or restartPolicy must not be nil. +// Either resources or restartPolicy must not be nil. // If restartRetries is not nil, restartPolicy must be set and must be "on-failure". func (c *Container) Update(resources *spec.LinuxResources, restartPolicy *string, restartRetries *uint) error { if !c.batched { @@ -342,25 +344,36 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams * close(hijackDone) }() + locked := false if !c.batched { + locked = true c.lock.Lock() + defer func() { + if locked { + c.lock.Unlock() + } + }() if err := c.syncContainer(); err != nil { - c.lock.Unlock() - return err } - // We are NOT holding the lock for the duration of the function. - c.lock.Unlock() } - - if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { - return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid) + // For Docker compatibility, we need to re-initialize containers in these states. + if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited, define.ContainerStateStopped) { + if err := c.initUnlocked(r.Context(), c.config.Pod != ""); err != nil { + return fmt.Errorf("preparing container %s for attach: %w", c.ID(), err) + } + } else if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { + return fmt.Errorf("can only attach to created or running containers - currently in state %s: %w", c.state.State.String(), define.ErrCtrStateInvalid) } if !streamAttach && !streamLogs { return fmt.Errorf("must specify at least one of stream or logs: %w", define.ErrInvalidArg) } + // We are NOT holding the lock for the duration of the function. + locked = false + c.lock.Unlock() + logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID()) c.newContainerEvent(events.Attach) @@ -530,6 +543,8 @@ func (c *Container) Wait(ctx context.Context) (int32, error) { // WaitForExit blocks until the container exits and returns its exit code. The // argument is the interval at which checks the container's status. +// If the argument is less than or equal to 0 Nanoseconds a default interval is +// used. func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) (int32, error) { id := c.ID() if !c.valid { @@ -541,119 +556,111 @@ func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) } return -1, define.ErrCtrRemoved } - var conmonTimer time.Timer - conmonTimerSet := false - conmonPidFd := c.getConmonPidFd() - if conmonPidFd != -1 { - defer unix.Close(conmonPidFd) - } - conmonPidFdTriggered := false - - getExitCode := func() (bool, int32, error) { - containerRemoved := false - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - } + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - if !errors.Is(err, define.ErrNoSuchCtr) { - return false, -1, err - } - containerRemoved = true - } - - // If conmon is not alive anymore set a timer to make sure - // we're returning even if conmon has forcefully been killed. - if !conmonTimerSet && !containerRemoved { - conmonAlive, err := c.ociRuntime.CheckConmonRunning(c) - switch { - case errors.Is(err, define.ErrNoSuchCtr): - // Container has been removed, so we assume the - // exit code is present in the DB. - containerRemoved = true - case err != nil: - return false, -1, err - case !conmonAlive: - // Give the exit code at most 20 seconds to - // show up in the DB. That should largely be - // enough for the cleanup process. - timerDuration := time.Second * 20 - conmonTimer = *time.NewTimer(timerDuration) - conmonTimerSet = true - case conmonAlive: - // Continue waiting if conmon's still running. - return false, -1, nil - } - } - - timedout := "" - if !containerRemoved { - // If conmon is dead for more than $timerDuration or if the - // container has exited properly, try to look up the exit code. - select { - case <-conmonTimer.C: - logrus.Debugf("Exceeded conmon timeout waiting for container %s to exit", id) - timedout = " [exceeded conmon timeout waiting for container to exit]" - default: - switch c.state.State { - case define.ContainerStateExited, define.ContainerStateConfigured: - // Container exited, so we can look up the exit code. - case define.ContainerStateStopped: - // Continue looping unless the restart policy is always. - // In this case, the container would never transition to - // the exited state, so we need to look up the exit code. - if c.config.RestartPolicy != define.RestartPolicyAlways { - return false, -1, nil - } - default: - // Continue looping - return false, -1, nil + if errors.Is(err, define.ErrNoSuchCtr) { + // if the container is not valid at this point as it was deleted, + // check if the exit code was recorded in the db. + exitCode, err := c.runtime.state.GetContainerExitCode(id) + if err == nil { + return exitCode, nil } } + return -1, err } + } + conmonPID := c.state.ConmonPID + // conmonPID == 0 means container is not running + if conmonPID == 0 { exitCode, err := c.runtime.state.GetContainerExitCode(id) if err != nil { if errors.Is(err, define.ErrNoSuchExitCode) { // If the container is configured or created, we must assume it never ran. if c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated) { - return true, 0, nil + return 0, nil } } - return true, -1, fmt.Errorf("%w (container in state %s)%s", err, c.state.State, timedout) + return -1, err } + return exitCode, nil + } + + conmonPidFd := c.getConmonPidFd() + if conmonPidFd > -1 { + defer unix.Close(conmonPidFd) + } - return true, exitCode, nil + if pollInterval <= 0 { + pollInterval = DefaultWaitInterval } - for { - hasExited, exitCode, err := getExitCode() - if hasExited { - return exitCode, err + // we cannot wait locked as we would hold the lock forever, so we unlock and then lock again + c.lock.Unlock() + err := waitForConmonExit(ctx, conmonPID, conmonPidFd, pollInterval) + c.lock.Lock() + if err != nil { + return -1, fmt.Errorf("failed to wait for conmon to exit: %w", err) + } + + // we locked again so we must sync the state + if err := c.syncContainer(); err != nil { + if errors.Is(err, define.ErrNoSuchCtr) { + // if the container is not valid at this point as it was deleted, + // check if the exit code was recorded in the db. + exitCode, err := c.runtime.state.GetContainerExitCode(id) + if err == nil { + return exitCode, nil + } } - if err != nil { - return -1, err + return -1, err + } + + // syncContainer already did the exit file read so nothing extra for us to do here + return c.runtime.state.GetContainerExitCode(id) +} + +func waitForConmonExit(ctx context.Context, conmonPID, conmonPidFd int, pollInterval time.Duration) error { + if conmonPidFd > -1 { + for { + fds := []unix.PollFd{{Fd: int32(conmonPidFd), Events: unix.POLLIN}} + if n, err := unix.Poll(fds, int(pollInterval.Milliseconds())); err != nil { + if err == unix.EINTR { + continue + } + return err + } else if n == 0 { + // n == 0 means timeout + select { + case <-ctx.Done(): + return define.ErrCanceled + default: + // context not done, wait again + continue + } + } + return nil + } + } + // no pidfd support, we must poll the pid + for { + if err := unix.Kill(conmonPID, 0); err != nil { + if err == unix.ESRCH { + break + } + return err } select { case <-ctx.Done(): - return -1, fmt.Errorf("waiting for exit code of container %s canceled", id) - default: - if conmonPidFd != -1 && !conmonPidFdTriggered { - // If possible (pidfd works), the first cycle we block until conmon dies - // If this happens, and we fall back to the old poll delay - // There is a deadlock in the cleanup code for "play kube" which causes - // conmon to not exit, so unfortunately we have to use the poll interval - // timeout here to avoid hanging. - fds := []unix.PollFd{{Fd: int32(conmonPidFd), Events: unix.POLLIN}} - _, _ = unix.Poll(fds, int(pollInterval.Milliseconds())) - conmonPidFdTriggered = true - } else { - time.Sleep(pollInterval) - } + return define.ErrCanceled + case <-time.After(pollInterval): } } + return nil } type waitResult struct { @@ -706,27 +713,27 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou } } - var wg sync.WaitGroup - if waitForExit { - wg.Add(1) go func() { - defer wg.Done() - code, err := c.WaitForExit(ctx, waitTimeout) trySend(code, err) }() } if len(wantedStates) > 0 || len(wantedHealthStates) > 0 { - wg.Add(1) go func() { - defer wg.Done() stoppedCount := 0 for { if len(wantedStates) > 0 { state, err := c.State() if err != nil { + // If the we wait for removing and the container is removed do not return this as error. + // This allows callers to actually wait for the ctr to be removed. + if wantedStates[define.ContainerStateRemoving] && + (errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved)) { + trySend(-1, nil) + return + } trySend(-1, err) return } @@ -784,13 +791,14 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou case <-ctx.Done(): result = waitResult{-1, define.ErrCanceled} } - wg.Wait() return result.code, result.err } // Cleanup unmounts all mount points in container and cleans up container storage -// It also cleans up the network stack -func (c *Container) Cleanup(ctx context.Context) error { +// It also cleans up the network stack. +// onlyStopped is set by the podman container cleanup to ensure we only cleanup a stopped container, +// all other states mean another process already called cleanup before us which is fine in such cases. +func (c *Container) Cleanup(ctx context.Context, onlyStopped bool) error { if !c.batched { c.lock.Lock() defer c.lock.Unlock() @@ -812,6 +820,9 @@ func (c *Container) Cleanup(ctx context.Context) error { if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) { return fmt.Errorf("container %s is running or paused, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid) } + if onlyStopped && !c.ensureState(define.ContainerStateStopped) { + return fmt.Errorf("container %s is not stopped and only cleanup for a stopped container was requested: %w", c.ID(), define.ErrCtrStateInvalid) + } // if the container was not created in the oci runtime or was already cleaned up, then do nothing if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { diff --git a/vendor/github.com/containers/podman/v5/libpod/container_config.go b/vendor/github.com/containers/podman/v5/libpod/container_config.go index 8c4e0176c..5ed80382a 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_config.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_config.go @@ -413,6 +413,14 @@ type ContainerMiscConfig struct { HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"` // HealthCheckOnFailureAction defines an action to take once the container turns unhealthy. HealthCheckOnFailureAction define.HealthCheckOnFailureAction `json:"healthcheck_on_failure_action"` + // HealthLogDestination defines the destination where the log is stored + HealthLogDestination string `json:"healthLogDestination,omitempty"` + // HealthMaxLogCount is maximum number of attempts in the HealthCheck log file. + // ('0' value means an infinite number of attempts in the log file) + HealthMaxLogCount uint `json:"healthMaxLogCount,omitempty"` + // HealthMaxLogSize is the maximum length in characters of stored HealthCheck log + // ("0" value means an infinite log length) + HealthMaxLogSize uint `json:"healthMaxLogSize,omitempty"` // StartupHealthCheckConfig is the configuration of the startup // healthcheck for the container. This will run before the regular HC // runs, and when it passes the regular HC will be activated. diff --git a/vendor/github.com/containers/podman/v5/libpod/container_inspect.go b/vendor/github.com/containers/podman/v5/libpod/container_inspect.go index ef4bac14e..a94175407 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_inspect.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_inspect.go @@ -195,7 +195,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver // inspect status should be set to nil. if c.config.HealthCheckConfig != nil && !(len(c.config.HealthCheckConfig.Test) == 1 && c.config.HealthCheckConfig.Test[0] == "NONE") { // This container has a healthcheck defined in it; we need to add its state - healthCheckState, err := c.getHealthCheckLog() + healthCheckState, err := c.readHealthCheckLog() if err != nil { // An error here is not considered fatal; no health state will be displayed logrus.Error(err) @@ -211,6 +211,11 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver return nil, err } data.NetworkSettings = networkConfig + // Ports in NetworkSettings includes exposed ports for network modes that are not host, + // and not container. + if !(c.config.NetNsCtr != "" || c.NetworkMode() == "host") { + addInspectPortsExpose(c.config.ExposedPorts, data.NetworkSettings.Ports) + } inspectConfig := c.generateInspectContainerConfig(ctrSpec) data.Config = inspectConfig @@ -253,6 +258,7 @@ func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes mountStruct.Type = "volume" mountStruct.Destination = volume.Dest mountStruct.Name = volume.Name + mountStruct.SubPath = volume.SubPath // For src and driver, we need to look up the named // volume. @@ -279,6 +285,7 @@ func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes mountStruct.Destination = volume.Dest mountStruct.Source = volume.Source mountStruct.RW = volume.ReadWrite + mountStruct.SubPath = volume.SubPath inspectMounts = append(inspectMounts, mountStruct) } @@ -372,6 +379,20 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp if spec.Process != nil { ctrConfig.Tty = spec.Process.Terminal ctrConfig.Env = append([]string{}, spec.Process.Env...) + + // finds all secrets mounted as env variables and hides the value + // the inspect command should not display it + envSecrets := c.config.EnvSecrets + for envIndex, envValue := range ctrConfig.Env { + // env variables come in the style `name=value` + envName := strings.Split(envValue, "=")[0] + + envSecret, ok := envSecrets[envName] + if ok { + ctrConfig.Env[envIndex] = envSecret.Name + "=*******" + } + } + ctrConfig.WorkingDir = spec.Process.Cwd } @@ -408,10 +429,18 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.StopSignal = signal.ToDockerFormat(c.config.StopSignal) // TODO: should JSON deep copy this to ensure internal pointers don't // leak. + ctrConfig.StartupHealthCheck = c.config.StartupHealthCheckConfig + ctrConfig.Healthcheck = c.config.HealthCheckConfig ctrConfig.HealthcheckOnFailureAction = c.config.HealthCheckOnFailureAction.String() + ctrConfig.HealthLogDestination = c.config.HealthLogDestination + + ctrConfig.HealthMaxLogCount = c.config.HealthMaxLogCount + + ctrConfig.HealthMaxLogSize = c.config.HealthMaxLogSize + ctrConfig.CreateCommand = c.config.CreateCommand ctrConfig.Timezone = c.config.Timezone @@ -438,6 +467,25 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.SdNotifyMode = c.config.SdNotifyMode ctrConfig.SdNotifySocket = c.config.SdNotifySocket + + // Exosed ports consists of all exposed ports and all port mappings for + // this container. It does *NOT* follow to another container if we share + // the network namespace. + exposedPorts := make(map[string]struct{}) + for port, protocols := range c.config.ExposedPorts { + for _, proto := range protocols { + exposedPorts[fmt.Sprintf("%d/%s", port, proto)] = struct{}{} + } + } + for _, mapping := range c.config.PortMappings { + for i := range mapping.Range { + exposedPorts[fmt.Sprintf("%d/%s", mapping.ContainerPort+i, mapping.Protocol)] = struct{}{} + } + } + if len(exposedPorts) > 0 { + ctrConfig.ExposedPorts = exposedPorts + } + return ctrConfig } @@ -517,6 +565,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named if ctrSpec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue { hostConfig.AutoRemove = true } + if ctrSpec.Annotations[define.InspectAnnotationAutoremoveImage] == define.InspectResponseTrue { + hostConfig.AutoRemoveImage = true + } if ctrs, ok := ctrSpec.Annotations[define.VolumesFromAnnotation]; ok { hostConfig.VolumesFrom = strings.Split(ctrs, ";") } diff --git a/vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go index a913b7e7d..a35b6754b 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go @@ -15,5 +15,14 @@ func (c *Container) platformInspectContainerHostConfig(ctrSpec *spec.Spec, hostC // UTS namespace mode hostConfig.UTSMode = c.NamespaceMode(spec.UTSNamespace, ctrSpec) + // Devices + // Do not include if privileged - assumed that all devices will be + // included. + var err error + hostConfig.Devices, err = c.GetDevices(hostConfig.Privileged, *ctrSpec, map[string]string{}) + if err != nil { + return err + } + return nil } diff --git a/vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go index 909be3d31..c407a7ebe 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go @@ -10,11 +10,9 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/util" + "github.com/moby/sys/capability" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/runtime-tools/generate" - "github.com/opencontainers/runtime-tools/validate/capabilities" "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" ) func (c *Container) platformInspectContainerHostConfig(ctrSpec *spec.Spec, hostConfig *define.InspectContainerHostConfig) error { @@ -152,15 +150,8 @@ func (c *Container) platformInspectContainerHostConfig(ctrSpec *spec.Spec, hostC boundingCaps[cap] = true } } else { - g, err := generate.New("linux") - if err != nil { - return err - } // If we are privileged, use all caps. - for _, cap := range capability.List() { - if g.HostSpecific && cap > capabilities.LastCap() { - continue - } + for _, cap := range capability.ListKnown() { boundingCaps[fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))] = true } } diff --git a/vendor/github.com/containers/podman/v5/libpod/container_internal.go b/vendor/github.com/containers/podman/v5/libpod/container_internal.go index 3f354c5a9..c7efd18e4 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_internal.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal.go @@ -1123,10 +1123,9 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { // bugzilla.redhat.com/show_bug.cgi?id=2144754: // In case of a restart, make sure to remove the healthcheck log to // have a clean state. - if path := c.healthCheckLogPath(); path != "" { - if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { - logrus.Error(err) - } + err = c.writeHealthCheckLog(define.HealthCheckResults{Status: define.HealthCheckReset}) + if err != nil { + return err } if err := c.save(); err != nil { @@ -1925,6 +1924,11 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) getOptions := copier.GetOptions{ KeepDirectoryNames: false, } + // If the volume is idmapped, we need to "undo" the idmapping + if slices.Contains(v.Options, "idmap") { + getOptions.UIDMap = c.config.IDMappings.UIDMap + getOptions.GIDMap = c.config.IDMappings.GIDMap + } errChan <- copier.Get(srcDir, "", getOptions, []string{"/."}, writer) }() @@ -1932,6 +1936,8 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) // the volume. copyOpts := copier.PutOptions{} if err := copier.Put(volMount, "", copyOpts, reader); err != nil { + // consume the reader otherwise the goroutine will block + _, _ = io.Copy(io.Discard, reader) err2 := <-errChan if err2 != nil { logrus.Errorf("Streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2) @@ -2049,6 +2055,11 @@ func (c *Container) cleanup(ctx context.Context) error { logrus.Debugf("Cleaning up container %s", c.ID()) + // Ensure we are not killed half way through cleanup + // which can leave us in a bad state. + shutdown.Inhibit() + defer shutdown.Uninhibit() + // Remove healthcheck unit/timer file if it execs if c.config.HealthCheckConfig != nil { if err := c.removeTransientFiles(ctx, @@ -2151,6 +2162,11 @@ func (c *Container) stopPodIfNeeded(ctx context.Context) error { return nil } + // Never try to stop the pod when a init container stopped + if c.IsInitCtr() { + return nil + } + pod, err := c.runtime.state.Pod(c.config.Pod) if err != nil { return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), c.config.Pod, err) @@ -2210,7 +2226,6 @@ func (c *Container) postDeleteHooks(ctx context.Context) error { return err } for i, hook := range extensionHooks { - hook := hook logrus.Debugf("container %s: invoke poststop hook %d, path %s", c.ID(), i, hook.Path) var stderr, stdout bytes.Buffer hookErr, err := exec.RunWithOptions( @@ -2377,7 +2392,7 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s // the container's mountpoint directly from the storage. // Otherwise, it returns an intermediate mountpoint that is accessible to anyone. func (c *Container) getRootPathForOCI() (string, error) { - if hasCurrentUserMapped(c) { + if hasCurrentUserMapped(c) || c.config.RootfsMapping != nil { return c.state.Mountpoint, nil } return c.getIntermediateMountpointUser() diff --git a/vendor/github.com/containers/podman/v5/libpod/container_internal_common.go b/vendor/github.com/containers/podman/v5/libpod/container_internal_common.go index f95644a8a..3000aeafd 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_internal_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal_common.go @@ -671,18 +671,18 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc return nil, nil, err } } - if isRootless { - for _, rlimit := range c.config.Spec.Process.Rlimits { - if rlimit.Type == "RLIMIT_NOFILE" { - nofileSet = true - } - if rlimit.Type == "RLIMIT_NPROC" { - nprocSet = true - } + for _, rlimit := range c.config.Spec.Process.Rlimits { + if rlimit.Type == "RLIMIT_NOFILE" { + nofileSet = true + } + if rlimit.Type == "RLIMIT_NPROC" { + nprocSet = true } - if !nofileSet { - max := rlimT(define.RLimitDefaultValue) - current := rlimT(define.RLimitDefaultValue) + } + if !nofileSet { + max := rlimT(define.RLimitDefaultValue) + current := rlimT(define.RLimitDefaultValue) + if isRootless { var rlimit unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err != nil { logrus.Warnf("Failed to return RLIMIT_NOFILE ulimit %q", err) @@ -693,11 +693,13 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc if rlimT(rlimit.Max) < max { max = rlimT(rlimit.Max) } - g.AddProcessRlimits("RLIMIT_NOFILE", uint64(max), uint64(current)) } - if !nprocSet { - max := rlimT(define.RLimitDefaultValue) - current := rlimT(define.RLimitDefaultValue) + g.AddProcessRlimits("RLIMIT_NOFILE", uint64(max), uint64(current)) + } + if !nprocSet { + max := rlimT(define.RLimitDefaultValue) + current := rlimT(define.RLimitDefaultValue) + if isRootless { var rlimit unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlimit); err != nil { logrus.Warnf("Failed to return RLIMIT_NPROC ulimit %q", err) @@ -708,8 +710,8 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc if rlimT(rlimit.Max) < max { max = rlimT(rlimit.Max) } - g.AddProcessRlimits("RLIMIT_NPROC", uint64(max), uint64(current)) } + g.AddProcessRlimits("RLIMIT_NPROC", uint64(max), uint64(current)) } c.addMaskedPaths(&g) @@ -2139,11 +2141,13 @@ func (c *Container) addResolvConf() error { if len(networkNameServers) == 0 || networkBackend != string(types.Netavark) { keepHostServers = true } - // first add the nameservers from the networks status - nameservers = networkNameServers - - // pasta and slirp4netns have a built in DNS forwarder. - nameservers = c.addSpecialDNS(nameservers) + if len(networkNameServers) > 0 { + // add the nameservers from the networks status + nameservers = networkNameServers + } else { + // pasta and slirp4netns have a built in DNS forwarder. + nameservers = c.addSpecialDNS(nameservers) + } } // Set DNS search domains @@ -2306,8 +2310,13 @@ func (c *Container) addHosts() error { } var exclude []net.IP + var preferIP string if c.pastaResult != nil { exclude = c.pastaResult.IPAddresses + if len(c.pastaResult.MapGuestAddrIPs) > 0 { + // we used --map-guest-addr to setup pasta so prefer this address + preferIP = c.pastaResult.MapGuestAddrIPs[0] + } } else if c.config.NetMode.IsBridge() { // When running rootless we have to check the rootless netns ip addresses // to not assign a ip that is already used in the rootless netns as it would @@ -2316,16 +2325,27 @@ func (c *Container) addHosts() error { info, err := c.runtime.network.RootlessNetnsInfo() if err == nil { exclude = info.IPAddresses + if len(info.MapGuestIps) > 0 { + // we used --map-guest-addr to setup pasta so prefer this address + preferIP = info.MapGuestIps[0] + } } } + hostContainersInternalIP := etchosts.GetHostContainersInternalIP(etchosts.HostContainersInternalOptions{ + Conf: c.runtime.config, + NetStatus: c.state.NetworkStatus, + NetworkInterface: c.runtime.network, + Exclude: exclude, + PreferIP: preferIP, + }) + return etchosts.New(&etchosts.Params{ - BaseFile: baseHostFile, - ExtraHosts: c.config.HostAdd, - ContainerIPs: containerIPsEntries, - HostContainersInternalIP: etchosts.GetHostContainersInternalIPExcluding( - c.runtime.config, c.state.NetworkStatus, c.runtime.network, exclude), - TargetFile: targetFile, + BaseFile: baseHostFile, + ExtraHosts: c.config.HostAdd, + ContainerIPs: containerIPsEntries, + HostContainersInternalIP: hostContainersInternalIP, + TargetFile: targetFile, }) } diff --git a/vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go index ac9892ef2..4635da064 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go @@ -146,15 +146,18 @@ func (c *Container) cleanupNetwork() error { } // Stop the container's network namespace (if it has one) - if err := c.runtime.teardownNetNS(c); err != nil { - logrus.Errorf("Unable to cleanup network for container %s: %q", c.ID(), err) - } + neterr := c.runtime.teardownNetNS(c) - if c.valid { - return c.save() + // always save even when there was an error + err = c.save() + if err != nil { + if neterr != nil { + logrus.Errorf("Unable to clean up network for container %s: %q", c.ID(), neterr) + } + return err } - return nil + return neterr } // reloadNetwork reloads the network for the given container, recreating diff --git a/vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go index c479a0255..38119a604 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go @@ -19,6 +19,7 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/shutdown" "github.com/containers/podman/v5/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -67,6 +68,9 @@ func (c *Container) prepare() error { tmpStateLock sync.Mutex ) + shutdown.Inhibit() + defer shutdown.Uninhibit() + wg.Add(2) go func() { @@ -187,18 +191,20 @@ func (c *Container) cleanupNetwork() error { } // Stop the container's network namespace (if it has one) - if err := c.runtime.teardownNetNS(c); err != nil { - logrus.Errorf("Unable to clean up network for container %s: %q", c.ID(), err) - } - + neterr := c.runtime.teardownNetNS(c) c.state.NetNS = "" c.state.NetworkStatus = nil - if c.valid { - return c.save() + // always save even when there was an error + err = c.save() + if err != nil { + if neterr != nil { + logrus.Errorf("Unable to clean up network for container %s: %q", c.ID(), neterr) + } + return err } - return nil + return neterr } // reloadNetwork reloads the network for the given container, recreating @@ -615,12 +621,16 @@ func (c *Container) setCgroupsPath(g *generate.Generator) error { // addSpecialDNS adds special dns servers for slirp4netns and pasta func (c *Container) addSpecialDNS(nameservers []string) []string { - if c.pastaResult != nil { + switch { + case c.config.NetMode.IsBridge(): + info, err := c.runtime.network.RootlessNetnsInfo() + if err == nil { + nameservers = append(nameservers, info.DnsForwardIps...) + } + case c.pastaResult != nil: nameservers = append(nameservers, c.pastaResult.DNSForwardIPs...) - } - - // slirp4netns has a built in DNS forwarder. - if c.config.NetMode.IsSlirp4netns() { + case c.config.NetMode.IsSlirp4netns(): + // slirp4netns has a built in DNS forwarder. slirp4netnsDNS, err := slirp4netns.GetDNS(c.slirp4netnsSubnet) if err != nil { logrus.Warn("Failed to determine Slirp4netns DNS: ", err.Error()) @@ -692,16 +702,14 @@ func (c *Container) makePlatformBindMounts() error { } func (c *Container) getConmonPidFd() int { - if c.state.ConmonPID != 0 { - // Track lifetime of conmon precisely using pidfd_open + poll. - // There are many cases for this to fail, for instance conmon is dead - // or pidfd_open is not supported (pre linux 5.3), so fall back to the - // traditional loop with poll + sleep - if fd, err := unix.PidfdOpen(c.state.ConmonPID, 0); err == nil { - return fd - } else if err != unix.ENOSYS && err != unix.ESRCH { - logrus.Debugf("PidfdOpen(%d) failed: %v", c.state.ConmonPID, err) - } + // Track lifetime of conmon precisely using pidfd_open + poll. + // There are many cases for this to fail, for instance conmon is dead + // or pidfd_open is not supported (pre linux 5.3), so fall back to the + // traditional loop with poll + sleep + if fd, err := unix.PidfdOpen(c.state.ConmonPID, 0); err == nil { + return fd + } else if err != unix.ENOSYS && err != unix.ESRCH { + logrus.Debugf("PidfdOpen(%d) failed: %v", c.state.ConmonPID, err) } return -1 } diff --git a/vendor/github.com/containers/podman/v5/libpod/container_log.go b/vendor/github.com/containers/podman/v5/libpod/container_log.go index ea76c7ea3..91c0a50cc 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_log.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_log.go @@ -10,7 +10,6 @@ import ( "time" "github.com/containers/podman/v5/libpod/define" - "github.com/containers/podman/v5/libpod/events" "github.com/containers/podman/v5/libpod/logs" systemdDefine "github.com/containers/podman/v5/pkg/systemd/define" "github.com/nxadm/tail" @@ -139,20 +138,10 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption // The container is running, so we need to wait until the container exited go func() { - eventChannel := make(chan *events.Event) - eventOptions := events.ReadOptions{ - EventChannel: eventChannel, - Filters: []string{"event=died", "container=" + c.ID()}, - Stream: true, + _, err = c.Wait(ctx) + if err != nil && !errors.Is(err, define.ErrNoSuchCtr) { + logrus.Errorf("Waiting for container to exit: %v", err) } - go func() { - if err := c.runtime.Events(ctx, eventOptions); err != nil { - logrus.Errorf("Waiting for container to exit: %v", err) - } - }() - // Now wait for the died event and signal to finish - // reading the log until EOF. - <-eventChannel // Make sure to wait at least for the poll duration // before stopping the file logger (see #10675). time.Sleep(watch.POLL_DURATION) diff --git a/vendor/github.com/containers/podman/v5/libpod/container_validate.go b/vendor/github.com/containers/podman/v5/libpod/container_validate.go index 0c2b604dd..29c71077f 100644 --- a/vendor/github.com/containers/podman/v5/libpod/container_validate.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_validate.go @@ -4,6 +4,7 @@ package libpod import ( "fmt" + "strings" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/pkg/shortnames" @@ -158,11 +159,30 @@ func (c *Container) validate() error { } } + // Autoremoving image requires autoremoving the associated container + if c.config.Spec.Annotations != nil { + if c.config.Spec.Annotations[define.InspectAnnotationAutoremoveImage] == define.InspectResponseTrue { + if c.config.Spec.Annotations[define.InspectAnnotationAutoremove] != define.InspectResponseTrue { + return fmt.Errorf("autoremoving image requires autoremoving the container: %w", define.ErrInvalidArg) + } + if c.config.Rootfs != "" { + return fmt.Errorf("autoremoving image is not possible when a rootfs is in use: %w", define.ErrInvalidArg) + } + } + } + // Cannot set startup HC without a healthcheck if c.config.HealthCheckConfig == nil && c.config.StartupHealthCheckConfig != nil { return fmt.Errorf("cannot set a startup healthcheck when there is no regular healthcheck: %w", define.ErrInvalidArg) } + // Ensure all ports list a single protocol + for _, p := range c.config.PortMappings { + if strings.Contains(p.Protocol, ",") { + return fmt.Errorf("each port mapping must define a single protocol, got a comma-separated list for container port %d (protocols requested %q): %w", p.ContainerPort, p.Protocol, define.ErrInvalidArg) + } + } + return nil } diff --git a/vendor/github.com/containers/podman/v5/libpod/define/annotations.go b/vendor/github.com/containers/podman/v5/libpod/define/annotations.go index ac1956f56..e1da8a157 100644 --- a/vendor/github.com/containers/podman/v5/libpod/define/annotations.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/annotations.go @@ -18,6 +18,12 @@ const ( // the two supported boolean values (InspectResponseTrue and // InspectResponseFalse) it will be used in the output of Inspect(). InspectAnnotationAutoremove = "io.podman.annotations.autoremove" + // InspectAnnotationAutoremoveImage is used by Inspect to identify + // containers which will automatically remove the image used by the + // container. If an annotation with this key is found in the OCI spec and + // is one of the two supported boolean values (InspectResponseTrue and + // InspectResponseFalse) it will be used in the output of Inspect(). + InspectAnnotationAutoremoveImage = "io.podman.annotations.autoremove-image" // InspectAnnotationPrivileged is used by Inspect to identify containers // which are privileged (IE, running with elevated privileges). // It is expected to be a boolean, populated by one of diff --git a/vendor/github.com/containers/podman/v5/libpod/define/container.go b/vendor/github.com/containers/podman/v5/libpod/define/container.go index ce7605992..5dd5465df 100644 --- a/vendor/github.com/containers/podman/v5/libpod/define/container.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/container.go @@ -61,4 +61,6 @@ const ( K8sKindDeployment = "deployment" // A DaemonSet kube yaml spec K8sKindDaemonSet = "daemonset" + // a Job kube yaml spec + K8sKindJob = "job" ) diff --git a/vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go b/vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go index 240b09d21..b4352554b 100644 --- a/vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go @@ -57,10 +57,20 @@ type InspectContainerConfig struct { Annotations map[string]string `json:"Annotations"` // Container stop signal StopSignal string `json:"StopSignal"` + // Configured startup healthcheck for the container + StartupHealthCheck *StartupHealthCheck `json:"StartupHealthCheck,omitempty"` // Configured healthcheck for the container Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` // HealthcheckOnFailureAction defines an action to take once the container turns unhealthy. HealthcheckOnFailureAction string `json:"HealthcheckOnFailureAction,omitempty"` + // HealthLogDestination defines the destination where the log is stored + HealthLogDestination string `json:"HealthLogDestination,omitempty"` + // HealthMaxLogCount is maximum number of attempts in the HealthCheck log file. + // ('0' value means an infinite number of attempts in the log file) + HealthMaxLogCount uint `json:"HealthcheckMaxLogCount,omitempty"` + // HealthMaxLogSize is the maximum length in characters of stored HealthCheck log + // ("0" value means an infinite log length) + HealthMaxLogSize uint `json:"HealthcheckMaxLogSize,omitempty"` // CreateCommand is the full command plus arguments of the process the // container has been created with. CreateCommand []string `json:"CreateCommand,omitempty"` @@ -89,6 +99,8 @@ type InspectContainerConfig struct { SdNotifyMode string `json:"sdNotifyMode,omitempty"` // SdNotifySocket is the NOTIFY_SOCKET in use by/configured for the container. SdNotifySocket string `json:"sdNotifySocket,omitempty"` + // ExposedPorts includes ports the container has exposed. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` // V4PodmanCompatMarshal indicates that the json marshaller should // use the old v4 inspect format to keep API compatibility. @@ -273,6 +285,9 @@ type InspectMount struct { // Mount propagation for the mount. Can be empty if not specified, but // is always printed - no omitempty. Propagation string `json:"Propagation"` + // SubPath object from the volume. Specified as a path within + // the source volume to be mounted at the Destination. + SubPath string `json:"SubPath,omitempty"` } // InspectContainerState provides a detailed record of a container's current @@ -390,6 +405,11 @@ type InspectContainerHostConfig struct { // It is not handled directly within libpod and is stored in an // annotation. AutoRemove bool `json:"AutoRemove"` + // AutoRemoveImage is whether the container's image will be + // automatically removed on exiting. + // It is not handled directly within libpod and is stored in an + // annotation. + AutoRemoveImage bool `json:"AutoRemoveImage"` // Annotations are provided to the runtime when the container is // started. Annotations map[string]string `json:"Annotations"` diff --git a/vendor/github.com/containers/podman/v5/libpod/define/healthchecks.go b/vendor/github.com/containers/podman/v5/libpod/define/healthchecks.go index 15ea79fc2..4fec27755 100644 --- a/vendor/github.com/containers/podman/v5/libpod/define/healthchecks.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/healthchecks.go @@ -16,6 +16,8 @@ const ( // and the start-period (time allowed for the container to start and application // to be running) expires. HealthCheckStarting string = "starting" + // HealthCheckReset describes reset of HealthCheck logs + HealthCheckReset string = "reset" ) // HealthCheckStatus represents the current state of a container @@ -56,8 +58,16 @@ const ( DefaultHealthCheckStartPeriod = "0s" // DefaultHealthCheckTimeout default value DefaultHealthCheckTimeout = "30s" + // DefaultHealthMaxLogCount default value + DefaultHealthMaxLogCount uint = 5 + // DefaultHealthMaxLogSize default value + DefaultHealthMaxLogSize uint = 500 + // DefaultHealthCheckLocalDestination default value + DefaultHealthCheckLocalDestination string = "local" ) +const HealthCheckEventsLoggerDestination string = "events_logger" + // HealthConfig.Test options const ( // HealthConfigTestNone disables healthcheck diff --git a/vendor/github.com/containers/podman/v5/libpod/events.go b/vendor/github.com/containers/podman/v5/libpod/events.go index 92af63632..daaacdd86 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events.go +++ b/vendor/github.com/containers/podman/v5/libpod/events.go @@ -6,8 +6,8 @@ import ( "context" "fmt" "path/filepath" - "sync" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/libpod/events" "github.com/sirupsen/logrus" ) @@ -28,27 +28,37 @@ func (r *Runtime) newEventer() (events.Eventer, error) { // newContainerEvent creates a new event based on a container func (c *Container) newContainerEvent(status events.Status) { - if err := c.newContainerEventWithInspectData(status, "", false); err != nil { + if err := c.newContainerEventWithInspectData(status, define.HealthCheckResults{}, false); err != nil { logrus.Errorf("Unable to write container event: %v", err) } } // newContainerHealthCheckEvent creates a new healthcheck event with the given status -func (c *Container) newContainerHealthCheckEvent(healthStatus string) { - if err := c.newContainerEventWithInspectData(events.HealthStatus, healthStatus, false); err != nil { +func (c *Container) newContainerHealthCheckEvent(healthCheckResult define.HealthCheckResults) { + if err := c.newContainerEventWithInspectData(events.HealthStatus, healthCheckResult, false); err != nil { logrus.Errorf("Unable to write container event: %v", err) } } // newContainerEventWithInspectData creates a new event and sets the // ContainerInspectData field if inspectData is set. -func (c *Container) newContainerEventWithInspectData(status events.Status, healthStatus string, inspectData bool) error { +func (c *Container) newContainerEventWithInspectData(status events.Status, healthCheckResult define.HealthCheckResults, inspectData bool) error { e := events.NewEvent(status) e.ID = c.ID() e.Name = c.Name() e.Image = c.config.RootfsImageName e.Type = events.Container - e.HealthStatus = healthStatus + e.HealthStatus = healthCheckResult.Status + if c.config.HealthLogDestination == define.HealthCheckEventsLoggerDestination { + if len(healthCheckResult.Log) > 0 { + logData, err := json.Marshal(healthCheckResult.Log[len(healthCheckResult.Log)-1]) + if err != nil { + return fmt.Errorf("unable to marshall healthcheck log for writing: %w", err) + } + e.HealthLog = string(logData) + } + } + e.HealthFailingStreak = healthCheckResult.FailingStreak e.Details = events.Details{ PodID: c.PodID(), @@ -176,7 +186,7 @@ func (r *Runtime) Events(ctx context.Context, options events.ReadOptions) error // GetEvents reads the event log and returns events based on input filters func (r *Runtime) GetEvents(ctx context.Context, filters []string) ([]*events.Event, error) { - eventChannel := make(chan *events.Event) + eventChannel := make(chan events.ReadResult) options := events.ReadOptions{ EventChannel: eventChannel, Filters: filters, @@ -184,45 +194,21 @@ func (r *Runtime) GetEvents(ctx context.Context, filters []string) ([]*events.Ev Stream: false, } - logEvents := make([]*events.Event, 0, len(eventChannel)) - readLock := sync.Mutex{} - readLock.Lock() - go func() { - for e := range eventChannel { - logEvents = append(logEvents, e) - } - readLock.Unlock() - }() - - readErr := r.eventer.Read(ctx, options) - readLock.Lock() // Wait for the events to be consumed. - return logEvents, readErr -} - -// GetLastContainerEvent takes a container name or ID and an event status and returns -// the last occurrence of the container event -func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, containerEvent events.Status) (*events.Event, error) { - // FIXME: events should be read in reverse order! - // https://github.com/containers/podman/issues/14579 - - // check to make sure the event.Status is valid - if _, err := events.StringToStatus(containerEvent.String()); err != nil { - return nil, err - } - filters := []string{ - fmt.Sprintf("container=%s", nameOrID), - fmt.Sprintf("event=%s", containerEvent), - "type=container", - } - containerEvents, err := r.GetEvents(ctx, filters) + err := r.eventer.Read(ctx, options) if err != nil { return nil, err } - if len(containerEvents) < 1 { - return nil, fmt.Errorf("%s not found: %w", containerEvent.String(), events.ErrEventNotFound) + + logEvents := make([]*events.Event, 0, len(eventChannel)) + for evt := range eventChannel { + // we ignore any error here, this is only used on the backup + // GetExecDiedEvent() died path as best effort anyway + if evt.Error == nil { + logEvents = append(logEvents, evt.Event) + } } - // return the last element in the slice - return containerEvents[len(containerEvents)-1], nil + + return logEvents, nil } // GetExecDiedEvent takes a container name or ID, exec session ID, and returns diff --git a/vendor/github.com/containers/podman/v5/libpod/events/config.go b/vendor/github.com/containers/podman/v5/libpod/events/config.go index d0ab5d45f..6337e4c15 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events/config.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/config.go @@ -16,8 +16,6 @@ const ( Journald EventerType = iota // Null is a no-op events logger. It does not read or write events. Null EventerType = iota - // Memory indicates the event logger will hold events in memory - Memory EventerType = iota ) // Event describes the attributes of a libpod event @@ -41,6 +39,10 @@ type Event struct { Type Type // Health status of the current container HealthStatus string `json:"health_status,omitempty"` + // Healthcheck log of the current container + HealthLog string `json:"health_log,omitempty"` + // HealthFailingStreak log of the current container + HealthFailingStreak int `json:"health_failing_streak,omitempty"` // Error code for certain events involving errors. Error string `json:"error,omitempty"` @@ -83,10 +85,15 @@ type Eventer interface { String() string } +type ReadResult struct { + Event *Event + Error error +} + // ReadOptions describe the attributes needed to read event logs type ReadOptions struct { // EventChannel is the comm path back to user - EventChannel chan *Event + EventChannel chan ReadResult // Filters are key/value pairs that describe to limit output Filters []string // FromStart means you start reading from the start of the logs diff --git a/vendor/github.com/containers/podman/v5/libpod/events/events.go b/vendor/github.com/containers/podman/v5/libpod/events/events.go index 5eda0033c..9f9b6f6f8 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events/events.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/events.go @@ -20,8 +20,6 @@ func (et EventerType) String() string { return "file" case Journald: return "journald" - case Memory: - return "memory" case Null: return "none" default: @@ -36,8 +34,6 @@ func IsValidEventer(eventer string) bool { return true case Journald.String(): return true - case Memory.String(): - return true case Null.String(): return true default: @@ -76,8 +72,10 @@ func (e *Event) ToHumanReadable(truncate bool) string { if e.PodID != "" { humanFormat += fmt.Sprintf(", pod_id=%s", e.PodID) } - if e.HealthStatus != "" { + if e.Status == HealthStatus { humanFormat += fmt.Sprintf(", health_status=%s", e.HealthStatus) + humanFormat += fmt.Sprintf(", health_failing_streak=%d", e.HealthFailingStreak) + humanFormat += fmt.Sprintf(", health_log=%s", e.HealthLog) } // check if the container has labels and add it to the output if len(e.Attributes) > 0 { diff --git a/vendor/github.com/containers/podman/v5/libpod/events/events_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/events/events_freebsd.go index 90933fa2c..116398d06 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events/events_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/events_freebsd.go @@ -15,8 +15,6 @@ func NewEventer(options EventerOptions) (Eventer, error) { return EventLogFile{options}, nil case strings.ToUpper(Null.String()): return newNullEventer(), nil - case strings.ToUpper(Memory.String()): - return NewMemoryEventer(), nil default: return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) } diff --git a/vendor/github.com/containers/podman/v5/libpod/events/events_linux.go b/vendor/github.com/containers/podman/v5/libpod/events/events_linux.go index 66b125dd5..f17f9708a 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events/events_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/events_linux.go @@ -21,8 +21,6 @@ func NewEventer(options EventerOptions) (Eventer, error) { return newLogFileEventer(options) case strings.ToUpper(Null.String()): return newNullEventer(), nil - case strings.ToUpper(Memory.String()): - return NewMemoryEventer(), nil default: return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) } diff --git a/vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go b/vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go index 2ee94090f..823250364 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go @@ -65,8 +65,13 @@ func (e EventJournalD) Write(ee Event) error { } m["PODMAN_LABELS"] = string(b) } - m["PODMAN_HEALTH_STATUS"] = ee.HealthStatus - + if ee.Status == HealthStatus { + m["PODMAN_HEALTH_STATUS"] = ee.HealthStatus + if ee.HealthLog != "" { + m["PODMAN_HEALTH_LOG"] = ee.HealthLog + } + m["PODMAN_HEALTH_FAILING_STREAK"] = strconv.Itoa(ee.HealthFailingStreak) + } if len(ee.Details.ContainerInspectData) > 0 { m["PODMAN_CONTAINER_INSPECT_DATA"] = ee.Details.ContainerInspectData } @@ -92,8 +97,7 @@ func (e EventJournalD) Write(ee Event) error { } // Read reads events from the journal and sends qualified events to the event channel -func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { - defer close(options.EventChannel) +func (e EventJournalD) Read(ctx context.Context, options ReadOptions) (retErr error) { filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) if err != nil { return fmt.Errorf("failed to parse event filters: %w", err) @@ -112,13 +116,15 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { return err } defer func() { - if err := j.Close(); err != nil { - logrus.Errorf("Unable to close journal :%v", err) + if retErr != nil { + if err := j.Close(); err != nil { + logrus.Errorf("Unable to close journal :%v", err) + } } }() err = j.SetDataThreshold(0) if err != nil { - logrus.Warnf("cannot set data threshold: %v", err) + return fmt.Errorf("cannot set data threshold for journal: %v", err) } // match only podman journal entries podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"} @@ -153,30 +159,40 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { } } - for { - entry, err := GetNextEntry(ctx, j, options.Stream, untilTime) - if err != nil { - return err - } - // no entry == we hit the end - if entry == nil { - return nil - } + go func() { + defer close(options.EventChannel) + defer func() { + if err := j.Close(); err != nil { + logrus.Errorf("Unable to close journal :%v", err) + } + }() + for { + entry, err := GetNextEntry(ctx, j, options.Stream, untilTime) + if err != nil { + options.EventChannel <- ReadResult{Error: err} + break + } + // no entry == we hit the end + if entry == nil { + break + } - newEvent, err := newEventFromJournalEntry(entry) - if err != nil { - // We can't decode this event. - // Don't fail hard - that would make events unusable. - // Instead, log and continue. - if !errors.Is(err, ErrEventTypeBlank) { - logrus.Errorf("Unable to decode event: %v", err) + newEvent, err := newEventFromJournalEntry(entry) + if err != nil { + // We can't decode this event. + // Don't fail hard - that would make events unusable. + // Instead, log and continue. + if !errors.Is(err, ErrEventTypeBlank) { + options.EventChannel <- ReadResult{Error: fmt.Errorf("unable to decode event: %v", err)} + } + continue + } + if applyFilters(newEvent, filterMap) { + options.EventChannel <- ReadResult{Event: newEvent} } - continue - } - if applyFilters(newEvent, filterMap) { - options.EventChannel <- newEvent } - } + }() + return nil } func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { @@ -225,6 +241,15 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { } } newEvent.HealthStatus = entry.Fields["PODMAN_HEALTH_STATUS"] + if log, ok := entry.Fields["PODMAN_HEALTH_LOG"]; ok { + newEvent.HealthLog = log + } + if FailingStreak, ok := entry.Fields["PODMAN_HEALTH_FAILING_STREAK"]; ok { + FailingStreakInt, err := strconv.Atoi(FailingStreak) + if err == nil { + newEvent.HealthFailingStreak = FailingStreakInt + } + } newEvent.Details.ContainerInspectData = entry.Fields["PODMAN_CONTAINER_INSPECT_DATA"] case Network: newEvent.ID = entry.Fields["PODMAN_ID"] diff --git a/vendor/github.com/containers/podman/v5/libpod/events/logfile.go b/vendor/github.com/containers/podman/v5/libpod/events/logfile.go index 9b69d15f6..4c0dd5e22 100644 --- a/vendor/github.com/containers/podman/v5/libpod/events/logfile.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/logfile.go @@ -108,7 +108,6 @@ func (e EventLogFile) readRotateEvent(event *Event) (begin bool, end bool, err e // Reads from the log file func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { - defer close(options.EventChannel) filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) if err != nil { return fmt.Errorf("failed to parse event filters: %w", err) @@ -148,56 +147,65 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { return err } - var line *tail.Line - var ok bool - var skipRotate bool - for { - select { - case <-ctx.Done(): - // the consumer has cancelled - t.Kill(errors.New("hangup by client")) - return nil - case line, ok = <-t.Lines: - if !ok { - // channel was closed - return nil + go func() { + defer close(options.EventChannel) + var line *tail.Line + var ok bool + var skipRotate bool + for { + select { + case <-ctx.Done(): + // the consumer has cancelled + t.Kill(errors.New("hangup by client")) + return + case line, ok = <-t.Lines: + if !ok { + // channel was closed + return + } + // fallthrough } - // fallthrough - } - event, err := newEventFromJSONString(line.Text) - if err != nil { - return err - } - switch event.Type { - case Image, Volume, Pod, Container, Network: - // no-op - case System: - begin, end, err := e.readRotateEvent(event) + event, err := newEventFromJSONString(line.Text) if err != nil { - return err + err := fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) + options.EventChannel <- ReadResult{Error: err} + continue } - if begin && event.Time.After(readTime) { - // If the rotation event happened _after_ we - // started reading, we need to ignore/skip - // subsequent event until the end of the - // rotation. - skipRotate = true - logrus.Debugf("Skipping already read events after log-file rotation: %v", event) - } else if end { - // This rotate event - skipRotate = false + switch event.Type { + case Image, Volume, Pod, Container, Network: + // no-op + case System: + begin, end, err := e.readRotateEvent(event) + if err != nil { + options.EventChannel <- ReadResult{Error: err} + continue + } + if begin && event.Time.After(readTime) { + // If the rotation event happened _after_ we + // started reading, we need to ignore/skip + // subsequent event until the end of the + // rotation. + skipRotate = true + logrus.Debugf("Skipping already read events after log-file rotation: %v", event) + } else if end { + // This rotate event + skipRotate = false + } + default: + err := fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) + options.EventChannel <- ReadResult{Error: err} + continue + } + if skipRotate { + continue + } + if applyFilters(event, filterMap) { + options.EventChannel <- ReadResult{Event: event} } - default: - return fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) - } - if skipRotate { - continue - } - if applyFilters(event, filterMap) { - options.EventChannel <- event } - } + }() + return nil } // String returns a string representation of the logger diff --git a/vendor/github.com/containers/podman/v5/libpod/events/memory.go b/vendor/github.com/containers/podman/v5/libpod/events/memory.go deleted file mode 100644 index b3e03d86b..000000000 --- a/vendor/github.com/containers/podman/v5/libpod/events/memory.go +++ /dev/null @@ -1,49 +0,0 @@ -package events - -import ( - "context" -) - -// EventMemory is the structure for event writing to a channel. It contains the eventer -// options and the event itself. Methods for reading and writing are also defined from it. -type EventMemory struct { - options EventerOptions - elements chan *Event -} - -// Write event to memory queue -func (e EventMemory) Write(event Event) (err error) { - e.elements <- &event - return -} - -// Read event(s) from memory queue -func (e EventMemory) Read(ctx context.Context, options ReadOptions) (err error) { - select { - case <-ctx.Done(): - return - default: - } - - select { - case event := <-e.elements: - options.EventChannel <- event - default: - } - return nil -} - -// String returns eventer type -func (e EventMemory) String() string { - return e.options.EventerType -} - -// NewMemoryEventer returns configured MemoryEventer -func NewMemoryEventer() Eventer { - return EventMemory{ - options: EventerOptions{ - EventerType: Memory.String(), - }, - elements: make(chan *Event, 100), - } -} diff --git a/vendor/github.com/containers/podman/v5/libpod/healthcheck.go b/vendor/github.com/containers/podman/v5/libpod/healthcheck.go index 95f20f2fd..ada4e004d 100644 --- a/vendor/github.com/containers/podman/v5/libpod/healthcheck.go +++ b/vendor/github.com/containers/podman/v5/libpod/healthcheck.go @@ -15,18 +15,11 @@ import ( "time" "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/shutdown" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) -const ( - // MaxHealthCheckNumberLogs is the maximum number of attempts we keep - // in the healthcheck history file - MaxHealthCheckNumberLogs int = 5 - // MaxHealthCheckLogLength in characters - MaxHealthCheckLogLength = 500 -) - // HealthCheck verifies the state and validity of the healthcheck configuration // on the container and then executes the healthcheck func (r *Runtime) HealthCheck(ctx context.Context, name string) (define.HealthCheckStatus, error) { @@ -143,8 +136,8 @@ func (c *Container) runHealthCheck(ctx context.Context, isStartup bool) (define. } eventLog := output.String() - if len(eventLog) > MaxHealthCheckLogLength { - eventLog = eventLog[:MaxHealthCheckLogLength] + if c.config.HealthMaxLogSize != 0 && len(eventLog) > int(c.config.HealthMaxLogSize) { + eventLog = eventLog[:c.config.HealthMaxLogSize] } if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout { @@ -154,21 +147,22 @@ func (c *Container) runHealthCheck(ctx context.Context, isStartup bool) (define. } hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog) - logStatus, err := c.updateHealthCheckLog(hcl, inStartPeriod, isStartup) + + healthCheckResult, err := c.updateHealthCheckLog(hcl, inStartPeriod, isStartup) if err != nil { - return hcResult, "", fmt.Errorf("unable to update health check log %s for %s: %w", c.healthCheckLogPath(), c.ID(), err) + return hcResult, "", fmt.Errorf("unable to update health check log %s for %s: %w", c.config.HealthLogDestination, c.ID(), err) } // Write HC event with appropriate status as the last thing before we // return. if hcResult == define.HealthCheckNotDefined || hcResult == define.HealthCheckInternalError { - return hcResult, logStatus, hcErr + return hcResult, healthCheckResult.Status, hcErr } if c.runtime.config.Engine.HealthcheckEvents { - c.newContainerHealthCheckEvent(logStatus) + c.newContainerHealthCheckEvent(healthCheckResult) } - return hcResult, logStatus, hcErr + return hcResult, healthCheckResult.Status, hcErr } func (c *Container) processHealthCheckStatus(status string) error { @@ -280,6 +274,13 @@ func (c *Container) incrementStartupHCSuccessCounter(ctx context.Context) { // Which happens to be us. // So this has to be last - after this, systemd serves us a // SIGTERM and we exit. + // Special case, via SIGTERM we exit(1) which means systemd logs a failure in the unit. + // We do not want this as the unit will be leaked on failure states unless "reset-failed" + // is called. Fundamentally this is expected so switch it to exit 0. + // NOTE: This is only safe while being called from "podman healthcheck run" which we know + // is the case here as we should not alter the exit code of another process that just + // happened to call this. + shutdown.SetExitCode(0) if err := c.removeTransientFiles(ctx, true, oldUnit); err != nil { logrus.Errorf("Error removing container %s healthcheck: %v", c.ID(), err) return @@ -340,16 +341,12 @@ func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.He // updateHealthStatus updates the health status of the container // in the healthcheck log func (c *Container) updateHealthStatus(status string) error { - healthCheck, err := c.getHealthCheckLog() + healthCheck, err := c.readHealthCheckLog() if err != nil { return err } healthCheck.Status = status - newResults, err := json.Marshal(healthCheck) - if err != nil { - return fmt.Errorf("unable to marshall healthchecks for writing status: %w", err) - } - return os.WriteFile(c.healthCheckLogPath(), newResults, 0700) + return c.writeHealthCheckLog(healthCheck) } // isUnhealthy returns true if the current health check status is unhealthy. @@ -357,7 +354,7 @@ func (c *Container) isUnhealthy() (bool, error) { if !c.HasHealthCheck() { return false, nil } - healthCheck, err := c.getHealthCheckLog() + healthCheck, err := c.readHealthCheckLog() if err != nil { return false, err } @@ -365,7 +362,7 @@ func (c *Container) isUnhealthy() (bool, error) { } // UpdateHealthCheckLog parses the health check results and writes the log -func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod, isStartup bool) (string, error) { +func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod, isStartup bool) (define.HealthCheckResults, error) { c.lock.Lock() defer c.lock.Unlock() @@ -373,12 +370,12 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio // both failing and succeeding cases to match kube behavior. // So don't update the health check log till the start period is over if _, ok := c.config.Spec.Annotations[define.KubeHealthCheckAnnotation]; ok && inStartPeriod && !isStartup { - return "", nil + return define.HealthCheckResults{}, nil } - healthCheck, err := c.getHealthCheckLog() + healthCheck, err := c.readHealthCheckLog() if err != nil { - return "", err + return define.HealthCheckResults{}, err } if hcl.ExitCode == 0 { // set status to healthy, reset failing state to 0 @@ -398,28 +395,48 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio } } healthCheck.Log = append(healthCheck.Log, hcl) - if len(healthCheck.Log) > MaxHealthCheckNumberLogs { + if c.config.HealthMaxLogCount != 0 && len(healthCheck.Log) > int(c.config.HealthMaxLogCount) { healthCheck.Log = healthCheck.Log[1:] } - newResults, err := json.Marshal(healthCheck) + return healthCheck, c.writeHealthCheckLog(healthCheck) +} + +func (c *Container) witeToFileHealthCheckResults(path string, result define.HealthCheckResults) error { + newResults, err := json.Marshal(result) if err != nil { - return "", fmt.Errorf("unable to marshall healthchecks for writing: %w", err) + return fmt.Errorf("unable to marshall healthchecks for writing: %w", err) + } + return os.WriteFile(path, newResults, 0700) +} + +func (c *Container) getHealthCheckLogDestination() string { + var destination string + switch c.config.HealthLogDestination { + case define.DefaultHealthCheckLocalDestination, define.HealthCheckEventsLoggerDestination, "": + destination = filepath.Join(filepath.Dir(c.state.RunDir), "healthcheck.log") + default: + destination = filepath.Join(c.config.HealthLogDestination, c.ID()+"-healthcheck.log") } - return healthCheck.Status, os.WriteFile(c.healthCheckLogPath(), newResults, 0700) + return destination +} + +func (c *Container) writeHealthCheckLog(result define.HealthCheckResults) error { + return c.witeToFileHealthCheckResults(c.getHealthCheckLogDestination(), result) } -// HealthCheckLogPath returns the path for where the health check log is -func (c *Container) healthCheckLogPath() string { - return filepath.Join(filepath.Dir(c.state.RunDir), "healthcheck.log") +// readHealthCheckLog read HealthCheck logs from the path or events_logger +// The caller should lock the container before this function is called. +func (c *Container) readHealthCheckLog() (define.HealthCheckResults, error) { + return c.readFromFileHealthCheckLog(c.getHealthCheckLogDestination()) } -// getHealthCheckLog returns HealthCheck results by reading the container's +// readFromFileHealthCheckLog returns HealthCheck results by reading the container's // health check log file. If the health check log file does not exist, then // an empty healthcheck struct is returned // The caller should lock the container before this function is called. -func (c *Container) getHealthCheckLog() (define.HealthCheckResults, error) { +func (c *Container) readFromFileHealthCheckLog(path string) (define.HealthCheckResults, error) { var healthCheck define.HealthCheckResults - b, err := os.ReadFile(c.healthCheckLogPath()) + b, err := os.ReadFile(path) if err != nil { if errors.Is(err, fs.ErrNotExist) { // If the file does not exists just return empty healthcheck and no error. @@ -428,7 +445,7 @@ func (c *Container) getHealthCheckLog() (define.HealthCheckResults, error) { return healthCheck, fmt.Errorf("failed to read health check log file: %w", err) } if err := json.Unmarshal(b, &healthCheck); err != nil { - return healthCheck, fmt.Errorf("failed to unmarshal existing healthcheck results in %s: %w", c.healthCheckLogPath(), err) + return healthCheck, fmt.Errorf("failed to unmarshal existing healthcheck results in %s: %w", path, err) } return healthCheck, nil } @@ -454,7 +471,7 @@ func (c *Container) healthCheckStatus() (string, error) { return "", err } - results, err := c.getHealthCheckLog() + results, err := c.readHealthCheckLog() if err != nil { return "", fmt.Errorf("unable to get healthcheck log for %s: %w", c.ID(), err) } diff --git a/vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go b/vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go index 344c4a202..e751c4e00 100644 --- a/vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go @@ -137,13 +137,8 @@ func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool, un stopErrors = append(stopErrors, fmt.Errorf("stopping systemd health-check timer %q: %w", timerFile, err)) } - // Reset the service before stopping it to make sure it's being removed - // on stop. serviceChan := make(chan string) serviceFile := fmt.Sprintf("%s.service", unitName) - if err := conn.ResetFailedUnitContext(ctx, serviceFile); err != nil { - logrus.Debugf("Failed to reset unit file: %q", err) - } if _, err := conn.StopUnitContext(ctx, serviceFile, "ignore-dependencies", serviceChan); err != nil { if !strings.HasSuffix(err.Error(), ".service not loaded.") { stopErrors = append(stopErrors, fmt.Errorf("removing health-check service %q: %w", serviceFile, err)) @@ -151,6 +146,12 @@ func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool, un } else if err := systemdOpSuccessful(serviceChan); err != nil { stopErrors = append(stopErrors, fmt.Errorf("stopping systemd health-check service %q: %w", serviceFile, err)) } + // Reset the service after stopping it to make sure it's being removed, systemd keep failed transient services + // around in its state. We do not care about the error and we need to ensure to reset the state so we do not + // leak resources forever. + if err := conn.ResetFailedUnitContext(ctx, serviceFile); err != nil { + logrus.Debugf("Failed to reset unit file: %q", err) + } return errorhandling.JoinErrors(stopErrors) } diff --git a/vendor/github.com/containers/podman/v5/libpod/info.go b/vendor/github.com/containers/podman/v5/libpod/info.go index 24e2fd910..81b3e8a01 100644 --- a/vendor/github.com/containers/podman/v5/libpod/info.go +++ b/vendor/github.com/containers/podman/v5/libpod/info.go @@ -229,7 +229,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) { var grStats syscall.Statfs_t if err := syscall.Statfs(r.store.GraphRoot(), &grStats); err != nil { - return nil, fmt.Errorf("unable to collect graph root usasge for %q: %w", r.store.GraphRoot(), err) + return nil, fmt.Errorf("unable to collect graph root usage for %q: %w", r.store.GraphRoot(), err) } allocated := uint64(grStats.Bsize) * grStats.Blocks info := define.StoreInfo{ diff --git a/vendor/github.com/containers/podman/v5/libpod/kube.go b/vendor/github.com/containers/podman/v5/libpod/kube.go index b376f9ef8..7964109d5 100644 --- a/vendor/github.com/containers/podman/v5/libpod/kube.go +++ b/vendor/github.com/containers/podman/v5/libpod/kube.go @@ -233,6 +233,61 @@ func GenerateForKubeDeployment(ctx context.Context, pod *YAMLPod, options entiti return &dep, nil } +// GenerateForKubeJob returns a YAMLDeployment from a YAMLPod that is then used to create a kubernetes Job +// kind YAML. +func GenerateForKubeJob(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLJob, error) { + // Restart policy for Job cannot be set to Always + if options.Type == define.K8sKindJob && pod.Spec.RestartPolicy == v1.RestartPolicyAlways { + return nil, fmt.Errorf("k8s Jobs can not have restartPolicy set to Always; only Never and OnFailure policies allowed") + } + + // Create label map that will be added to podSpec and Job metadata + // The matching label lets the job know which pods to manage + appKey := "app" + matchLabels := map[string]string{appKey: pod.Name} + // Add the key:value (app:pod-name) to the podSpec labels + if pod.Labels == nil { + pod.Labels = matchLabels + } else { + pod.Labels[appKey] = pod.Name + } + + jobSpec := YAMLJobSpec{ + Template: &YAMLPodTemplateSpec{ + PodTemplateSpec: v1.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + }, + Spec: pod.Spec, + }, + } + + // Set the completions and parallelism to 1 by default for the Job + completions, parallelism := int32(1), int32(1) + jobSpec.Completions = &completions + jobSpec.Parallelism = ¶llelism + // Set the restart policy to never as k8s requires a job to have a restart policy + // of onFailure or never set in the kube yaml + jobSpec.Template.Spec.RestartPolicy = v1.RestartPolicyNever + + // Create the Deployment object + job := YAMLJob{ + Job: v1.Job{ + ObjectMeta: v12.ObjectMeta{ + Name: pod.Name + "-job", + CreationTimestamp: pod.CreationTimestamp, + Labels: pod.Labels, + }, + TypeMeta: v12.TypeMeta{ + Kind: "Job", + APIVersion: "batch/v1", + }, + }, + Spec: &jobSpec, + } + + return &job, nil +} + // GenerateForKube generates a v1.PersistentVolumeClaim from a libpod volume. func (v *Volume) GenerateForKube() *v1.PersistentVolumeClaim { annotations := make(map[string]string) @@ -328,6 +383,15 @@ type YAMLDaemonSetSpec struct { Strategy *v1.DaemonSetUpdateStrategy `json:"strategy,omitempty"` } +// YAMLJobSpec represents the same k8s API core JobSpec with a small +// change and that is having Template as a pointer to YAMLPodTemplateSpec +// because Go doesn't omit empty struct and we want to omit Strategy and any fields in the Pod YAML +// if it's empty. +type YAMLJobSpec struct { + v1.JobSpec + Template *YAMLPodTemplateSpec `json:"template,omitempty"` +} + // YAMLDaemonSet represents the same k8s API core DaemonSet with a small change // and that is having Spec as a pointer to YAMLDaemonSetSpec and Status as a pointer to // k8s API core DaemonSetStatus. @@ -350,6 +414,12 @@ type YAMLDeployment struct { Status *v1.DeploymentStatus `json:"status,omitempty"` } +type YAMLJob struct { + v1.Job + Spec *YAMLJobSpec `json:"spec,omitempty"` + Status *v1.JobStatus `json:"status,omitempty"` +} + // YAMLService represents the same k8s API core Service struct with a small // change and that is having Status as a pointer to k8s API core ServiceStatus. // Because Go doesn't omit empty struct and we want to omit Status in YAML @@ -507,12 +577,42 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po stopTimeout *uint ) + cfg, err := config.Default() + if err != nil { + return nil, err + } + // Let's sort the containers in order of created time // This will ensure that the init containers are defined in the correct order in the kube yaml sort.Slice(containers, func(i, j int) bool { return containers[i].CreatedTime().Before(containers[j].CreatedTime()) }) for _, ctr := range containers { - if !ctr.IsInfra() { + if ctr.IsInfra() { + // If there is an user namespace for the infra container, then register it for the entire pod. + if v, found := ctr.config.Spec.Annotations[define.UserNsAnnotation]; found { + podAnnotations[define.UserNsAnnotation] = v + } + _, _, infraDNS, _, err := containerToV1Container(ctx, ctr, getService) + if err != nil { + return nil, err + } + if infraDNS != nil { + if servers := infraDNS.Nameservers; len(servers) > 0 { + dnsInfo.Nameservers = servers + } + if searches := infraDNS.Searches; len(searches) > 0 { + dnsInfo.Searches = searches + } + if options := infraDNS.Options; len(options) > 0 { + dnsInfo.Options = options + } + } + // If the infraName is not the podID-infra, that means the user set another infra name using + // --infra-name during pod creation + if infraName != "" && infraName != p.ID()[:12]+"-infra" { + podAnnotations[define.InfraNameAnnotation] = infraName + } + } else { for k, v := range ctr.config.Spec.Annotations { if !podmanOnly && (define.IsReservedAnnotation(k)) { continue @@ -535,7 +635,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po // Pick the first container that has a stop-timeout set and use that value // Ignore podman's default - if ctr.config.StopTimeout != util.DefaultContainerConfig().Engine.StopTimeout && stopTimeout == nil { + if ctr.config.StopTimeout != cfg.Engine.StopTimeout && stopTimeout == nil { stopTimeout = &ctr.config.StopTimeout } @@ -571,30 +671,8 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po // Deduplicate volumes, so if containers in the pod share a volume, it's only // listed in the volumes section once for _, vol := range volumes { - vol := vol deDupPodVolumes[vol.Name] = &vol } - } else { - _, _, infraDNS, _, err := containerToV1Container(ctx, ctr, getService) - if err != nil { - return nil, err - } - if infraDNS != nil { - if servers := infraDNS.Nameservers; len(servers) > 0 { - dnsInfo.Nameservers = servers - } - if searches := infraDNS.Searches; len(searches) > 0 { - dnsInfo.Searches = searches - } - if options := infraDNS.Options; len(options) > 0 { - dnsInfo.Options = options - } - } - // If the infraName is not the podID-infra, that means the user set another infra name using - // --infra-name during pod creation - if infraName != "" && infraName != p.ID()[:12]+"-infra" { - podAnnotations[define.InfraNameAnnotation] = infraName - } } } podVolumes := []v1.Volume{} @@ -663,6 +741,11 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta // simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated // for a single container. we "insert" that container description in a pod. func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService, podmanOnly bool) (*v1.Pod, error) { + cfg, err := config.Default() + if err != nil { + return nil, err + } + kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeInitCtrs := []v1.Container{} kubeVolumes := make([]v1.Volume, 0) @@ -702,7 +785,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic // Pick the first container that has a stop-timeout set and use that value // Ignore podman's default - if ctr.config.StopTimeout != util.DefaultContainerConfig().Engine.StopTimeout && stopTimeout == nil { + if ctr.config.StopTimeout != cfg.Engine.StopTimeout && stopTimeout == nil { stopTimeout = &ctr.config.StopTimeout } @@ -713,7 +796,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic if ctr.config.Spec.Process != nil { var ulimitArr []string - defaultUlimits := util.DefaultContainerConfig().Ulimits() + defaultUlimits := cfg.Ulimits() for _, ulimit := range ctr.config.Spec.Process.Rlimits { finalUlimit := strings.ToLower(strings.ReplaceAll(ulimit.Type, "RLIMIT_", "")) + "=" + strconv.Itoa(int(ulimit.Soft)) + ":" + strconv.Itoa(int(ulimit.Hard)) // compare ulimit with default list so we don't add it twice diff --git a/vendor/github.com/containers/podman/v5/libpod/lock/file/file_lock.go b/vendor/github.com/containers/podman/v5/libpod/lock/file/file_lock.go index 6e7dda545..67dd35a3a 100644 --- a/vendor/github.com/containers/podman/v5/libpod/lock/file/file_lock.go +++ b/vendor/github.com/containers/podman/v5/libpod/lock/file/file_lock.go @@ -1,7 +1,9 @@ package file import ( + "errors" "fmt" + "io/fs" "os" "path/filepath" "strconv" @@ -138,7 +140,9 @@ func (locks *FileLocks) DeallocateAllLocks() error { p := filepath.Join(locks.lockPath, f.Name()) err := os.Remove(p) if err != nil { - lastErr = err + if errors.Is(err, fs.ErrNotExist) { + continue + } logrus.Errorf("Deallocating lock %s", p) } } diff --git a/vendor/github.com/containers/podman/v5/libpod/networking_common.go b/vendor/github.com/containers/podman/v5/libpod/networking_common.go index daf840df0..bac0d4862 100644 --- a/vendor/github.com/containers/podman/v5/libpod/networking_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_common.go @@ -204,7 +204,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e } settings := new(define.InspectNetworkSettings) - settings.Ports = makeInspectPorts(c.config.PortMappings, c.config.ExposedPorts) + settings.Ports = makeInspectPortBindings(c.config.PortMappings) networks, err := c.networks() if err != nil { diff --git a/vendor/github.com/containers/podman/v5/libpod/networking_linux.go b/vendor/github.com/containers/podman/v5/libpod/networking_linux.go index 9ffdc1d0c..688985b5e 100644 --- a/vendor/github.com/containers/podman/v5/libpod/networking_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_linux.go @@ -229,9 +229,13 @@ func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBloc } var gateway net.IP for _, route := range routes { - // default gateway - if route.Dst == nil { - gateway = route.Gw + // add default gateway + // Dst is set to 0.0.0.0/0 or ::/0 which is the default route + if route.Dst != nil && route.Dst.IP.IsUnspecified() { + ones, _ := route.Dst.Mask.Size() + if ones == 0 { + gateway = route.Gw + } } } result.Interfaces = make(map[string]types.NetInterface) diff --git a/vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go b/vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go index 7934bc591..89724514b 100644 --- a/vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go @@ -12,7 +12,7 @@ package libpod import "github.com/containers/common/libnetwork/pasta" func (r *Runtime) setupPasta(ctr *Container, netns string) error { - res, err := pasta.Setup2(&pasta.SetupOptions{ + res, err := pasta.Setup(&pasta.SetupOptions{ Config: r.config, Netns: netns, Ports: ctr.convertPortMappings(), diff --git a/vendor/github.com/containers/podman/v5/libpod/oci.go b/vendor/github.com/containers/podman/v5/libpod/oci.go index a10056779..e0d740633 100644 --- a/vendor/github.com/containers/podman/v5/libpod/oci.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci.go @@ -29,8 +29,6 @@ type OCIRuntime interface { //nolint:interfacebloat // the given container if it is a restore and if restoreOptions.PrintStats // is true. In all other cases the returned int64 is 0. CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) - // UpdateContainerStatus updates the status of the given container. - UpdateContainerStatus(ctr *Container) error // StartContainer starts the given container. StartContainer(ctr *Container) error // KillContainer sends the given signal to the given container. diff --git a/vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go index a3e5a24b0..8aa103fb4 100644 --- a/vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go @@ -34,6 +34,7 @@ import ( "github.com/containers/podman/v5/pkg/specgenutil" "github.com/containers/podman/v5/pkg/util" "github.com/containers/podman/v5/utils" + "github.com/containers/storage/pkg/idtools" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -172,18 +173,20 @@ func hasCurrentUserMapped(ctr *Container) bool { if len(ctr.config.IDMappings.UIDMap) == 0 && len(ctr.config.IDMappings.GIDMap) == 0 { return true } - uid := os.Geteuid() - for _, m := range ctr.config.IDMappings.UIDMap { - if uid >= m.HostID && uid < m.HostID+m.Size { - return true + containsID := func(id int, mappings []idtools.IDMap) bool { + for _, m := range mappings { + if id >= m.HostID && id < m.HostID+m.Size { + return true + } } + return false } - return false + return containsID(os.Geteuid(), ctr.config.IDMappings.UIDMap) && containsID(os.Getegid(), ctr.config.IDMappings.GIDMap) } // CreateContainer creates a container. func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { - if !hasCurrentUserMapped(ctr) { + if !hasCurrentUserMapped(ctr) || ctr.config.RootfsMapping != nil { // if we are running a non privileged container, be sure to umount some kernel paths so they are not // bind mounted inside the container at all. hideFiles := !ctr.config.Privileged && !rootless.IsRootless() @@ -192,91 +195,6 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta return r.createOCIContainer(ctr, restoreOptions) } -// UpdateContainerStatus retrieves the current status of the container from the -// runtime. It updates the container's state but does not save it. -// If useRuntime is false, we will not directly hit runc to see the container's -// status, but will instead only check for the existence of the conmon exit file -// and update state to stopped if it exists. -func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - - // Store old state so we know if we were already stopped - oldState := ctr.state.State - - state := new(spec.State) - - cmd := exec.Command(r.path, "state", ctr.ID()) - cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) - - outPipe, err := cmd.StdoutPipe() - if err != nil { - return fmt.Errorf("getting stdout pipe: %w", err) - } - errPipe, err := cmd.StderrPipe() - if err != nil { - return fmt.Errorf("getting stderr pipe: %w", err) - } - - err = cmd.Start() - if err != nil { - return fmt.Errorf("error launching container runtime: %w", err) - } - defer func() { - _ = cmd.Wait() - }() - - stderr, err := io.ReadAll(errPipe) - if err != nil { - return fmt.Errorf("reading stderr: %s: %w", ctr.ID(), err) - } - if strings.Contains(string(stderr), "does not exist") || strings.Contains(string(stderr), "No such file") { - if err := ctr.removeConmonFiles(); err != nil { - logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) - } - ctr.state.ExitCode = -1 - ctr.state.FinishedTime = time.Now() - ctr.state.State = define.ContainerStateExited - return ctr.runtime.state.AddContainerExitCode(ctr.ID(), ctr.state.ExitCode) - } - if err := errPipe.Close(); err != nil { - return err - } - - out, err := io.ReadAll(outPipe) - if err != nil { - return fmt.Errorf("reading stdout: %s: %w", ctr.ID(), err) - } - if err := json.NewDecoder(bytes.NewReader(out)).Decode(state); err != nil { - return fmt.Errorf("decoding container status for container %s: %w", ctr.ID(), err) - } - ctr.state.PID = state.Pid - - switch state.Status { - case "created": - ctr.state.State = define.ContainerStateCreated - case "paused": - ctr.state.State = define.ContainerStatePaused - case "running": - ctr.state.State = define.ContainerStateRunning - case "stopped": - ctr.state.State = define.ContainerStateStopped - default: - return fmt.Errorf("unrecognized status returned by runtime for container %s: %s: %w", - ctr.ID(), state.Status, define.ErrInternal) - } - - // Handle ContainerStateStopping - keep it unless the container - // transitioned to no longer running. - if oldState == define.ContainerStateStopping && (ctr.state.State == define.ContainerStatePaused || ctr.state.State == define.ContainerStateRunning) { - ctr.state.State = define.ContainerStateStopping - } - - return nil -} - // StartContainer starts the given container. // Sets time the container was started, but does not save it. func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error { @@ -358,6 +276,8 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) // If captureStderr is requested, OCI runtime STDERR will be captured as a // *bytes.buffer and returned; otherwise, it is set to os.Stderr. +// IMPORTANT: Thus function is called from an unlocked container state in +// the stop() code path so do not modify the state here. func (r *ConmonOCIRuntime) killContainer(ctr *Container, signal uint, all, captureStderr bool) (*bytes.Buffer, error) { logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID()) runtimeDir, err := util.GetRootlessRuntimeDir() @@ -381,15 +301,15 @@ func (r *ConmonOCIRuntime) killContainer(ctr *Container, signal uint, all, captu stderr = stderrBuffer } if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, stderr, env, r.path, args...); err != nil { - // Update container state - there's a chance we failed because - // the container exited in the meantime. - if err2 := r.UpdateContainerStatus(ctr); err2 != nil { - logrus.Infof("Error updating status for container %s: %v", ctr.ID(), err2) - } - if ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { - return stderrBuffer, fmt.Errorf("%w: %s", define.ErrCtrStateInvalid, ctr.state.State) + rErr := err + // quick check if ctr pid is still alive + if err := unix.Kill(ctr.state.PID, 0); err == unix.ESRCH { + // pid already dead so signal sending fails logically, set error to ErrCtrStateInvalid + // This is needed for the ProxySignals() function which already ignores the error to not cause flakes + // when the ctr process just exited. + rErr = define.ErrCtrStateInvalid } - return stderrBuffer, fmt.Errorf("sending signal to container %s: %w", ctr.ID(), err) + return stderrBuffer, fmt.Errorf("sending signal to container %s: %w", ctr.ID(), rErr) } return stderrBuffer, nil @@ -401,6 +321,8 @@ func (r *ConmonOCIRuntime) killContainer(ctr *Container, signal uint, all, captu // immediately kill with SIGKILL. // Does not set finished time for container, assumes you will run updateStatus // after to pull the exit code. +// IMPORTANT: Thus function is called from an unlocked container state in +// the stop() code path so do not modify the state here. func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) error { logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID) @@ -1116,7 +1038,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co args = append(args, "--no-pivot") } - exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, ctr.runtime.syslog || logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), false) + exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, ctr.runtime.syslog || logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), ctr.AutoRemoveImage(), false) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/podman/v5/libpod/oci_missing.go b/vendor/github.com/containers/podman/v5/libpod/oci_missing.go index 0ac6417f5..98eb91ef8 100644 --- a/vendor/github.com/containers/podman/v5/libpod/oci_missing.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_missing.go @@ -76,11 +76,6 @@ func (r *MissingRuntime) CreateContainer(ctr *Container, restoreOptions *Contain return 0, r.printError() } -// UpdateContainerStatus is not available as the runtime is missing -func (r *MissingRuntime) UpdateContainerStatus(ctr *Container) error { - return r.printError() -} - // StartContainer is not available as the runtime is missing func (r *MissingRuntime) StartContainer(ctr *Container) error { return r.printError() diff --git a/vendor/github.com/containers/podman/v5/libpod/options.go b/vendor/github.com/containers/podman/v5/libpod/options.go index 9f30f2f32..2569f9fd1 100644 --- a/vendor/github.com/containers/podman/v5/libpod/options.go +++ b/vendor/github.com/containers/podman/v5/libpod/options.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" "net" + "os" + "path/filepath" "strings" "syscall" "time" @@ -1065,7 +1067,7 @@ func WithDependencyCtrs(ctrs []*Container) CtrCreateOption { // namespace with a minimal configuration. // An optional array of port mappings can be provided. // Conflicts with WithNetNSFrom(). -func WithNetNS(portMappings []nettypes.PortMapping, exposedPorts map[uint16][]string, postConfigureNetNS bool, netmode string, networks map[string]nettypes.PerNetworkOptions) CtrCreateOption { +func WithNetNS(portMappings []nettypes.PortMapping, postConfigureNetNS bool, netmode string, networks map[string]nettypes.PerNetworkOptions) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { return define.ErrCtrFinalized @@ -1075,7 +1077,6 @@ func WithNetNS(portMappings []nettypes.PortMapping, exposedPorts map[uint16][]st ctr.config.NetMode = namespaces.NetworkMode(netmode) ctr.config.CreateNetNS = true ctr.config.PortMappings = portMappings - ctr.config.ExposedPorts = exposedPorts if !ctr.config.NetMode.IsBridge() && len(networks) > 0 { return errors.New("cannot use networks when network mode is not bridge") @@ -1086,6 +1087,20 @@ func WithNetNS(portMappings []nettypes.PortMapping, exposedPorts map[uint16][]st } } +// WithExposedPorts includes a set of ports that were exposed by the image in +// the container config, e.g. for display when the container is inspected. +func WithExposedPorts(exposedPorts map[uint16][]string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.ExposedPorts = exposedPorts + + return nil + } +} + // WithNetworkOptions sets additional options for the networks. func WithNetworkOptions(options map[string][]string) CtrCreateOption { return func(ctr *Container) error { @@ -1500,6 +1515,57 @@ func WithHealthCheck(healthCheck *manifest.Schema2HealthConfig) CtrCreateOption } } +// WithHealthCheckLogDestination adds the healthLogDestination to the container config +func WithHealthCheckLogDestination(destination string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + switch destination { + case define.HealthCheckEventsLoggerDestination, define.DefaultHealthCheckLocalDestination: + ctr.config.HealthLogDestination = destination + default: + fileInfo, err := os.Stat(destination) + if err != nil { + return fmt.Errorf("HealthCheck Log '%s' destination error: %w", destination, err) + } + mode := fileInfo.Mode() + if !mode.IsDir() { + return fmt.Errorf("HealthCheck Log '%s' destination must be directory", destination) + } + + absPath, err := filepath.Abs(destination) + if err != nil { + return err + } + ctr.config.HealthLogDestination = absPath + } + return nil + } +} + +// WithHealthCheckMaxLogCount adds the healthMaxLogCount to the container config +func WithHealthCheckMaxLogCount(maxLogCount uint) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + ctr.config.HealthMaxLogCount = maxLogCount + return nil + } +} + +// WithHealthCheckMaxLogSize adds the healthMaxLogSize to the container config +func WithHealthCheckMaxLogSize(maxLogSize uint) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + ctr.config.HealthMaxLogSize = maxLogSize + return nil + } +} + // WithHealthCheckOnFailureAction adds an on-failure action to health-check config func WithHealthCheckOnFailureAction(action define.HealthCheckOnFailureAction) CtrCreateOption { return func(ctr *Container) error { diff --git a/vendor/github.com/containers/podman/v5/libpod/pod_api.go b/vendor/github.com/containers/podman/v5/libpod/pod_api.go index 9b0cd5c32..f0effef84 100644 --- a/vendor/github.com/containers/podman/v5/libpod/pod_api.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_api.go @@ -180,7 +180,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m } if cleanup { - err := c.Cleanup(ctx) + err := c.Cleanup(ctx, false) if err != nil && !errors.Is(err, define.ErrCtrStateInvalid) && !errors.Is(err, define.ErrCtrStopped) { return err } @@ -262,7 +262,10 @@ func (p *Pod) stopIfOnlyInfraRemains(ctx context.Context, ignoreID string) error } } - _, err = p.stopWithTimeout(ctx, true, -1) + errs, err := p.stopWithTimeout(ctx, true, -1) + for ctr, e := range errs { + logrus.Errorf("Failed to stop container %s: %v", ctr, e) + } return err } @@ -296,7 +299,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) { c := ctr logrus.Debugf("Adding parallel job to clean up container %s", c.ID()) retChan := parallel.Enqueue(ctx, func() error { - return c.Cleanup(ctx) + return c.Cleanup(ctx, false) }) ctrErrChan[c.ID()] = retChan diff --git a/vendor/github.com/containers/podman/v5/libpod/runtime.go b/vendor/github.com/containers/podman/v5/libpod/runtime.go index 73c607ad4..1daf2855b 100644 --- a/vendor/github.com/containers/podman/v5/libpod/runtime.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime.go @@ -218,11 +218,6 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R if runtime.store != nil { _, _ = runtime.store.Shutdown(false) } - // For `systemctl stop podman.service` support, exit code should be 0 - if sig == syscall.SIGTERM { - os.Exit(0) - } - os.Exit(1) return nil }); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) { logrus.Errorf("Registering shutdown handler for libpod: %v", err) @@ -1398,3 +1393,7 @@ func (r *Runtime) SystemCheck(ctx context.Context, options entities.SystemCheckO return report, err } + +func (r *Runtime) GetContainerExitCode(id string) (int32, error) { + return r.state.GetContainerExitCode(id) +} diff --git a/vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go index a3f26cc52..87d848478 100644 --- a/vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go @@ -576,7 +576,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } if ctr.runtime.config.Engine.EventsContainerCreateInspectData { - if err := ctr.newContainerEventWithInspectData(events.Create, "", true); err != nil { + if err := ctr.newContainerEventWithInspectData(events.Create, define.HealthCheckResults{}, true); err != nil { return nil, err } } else { @@ -1412,13 +1412,13 @@ func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) { mountCnt, err := r.storageService.MountedContainerImage(id) if err != nil { - return false, "", err + return false, "", fmt.Errorf("get mount count of container: %w", err) } mounted := mountCnt > 0 if mounted { path, err = r.storageService.GetMountpoint(id) if err != nil { - return false, "", err + return false, "", fmt.Errorf("get container mount point: %w", err) } } return mounted, path, nil diff --git a/vendor/github.com/containers/podman/v5/libpod/runtime_volume.go b/vendor/github.com/containers/podman/v5/libpod/runtime_volume.go index 6066145f0..ff04eec3c 100644 --- a/vendor/github.com/containers/podman/v5/libpod/runtime_volume.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_volume.go @@ -28,13 +28,6 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeo return define.ErrRuntimeStopped } - if !v.valid { - if ok, _ := r.state.HasVolume(v.Name()); !ok { - // Volume probably already removed - // Or was never in the runtime to begin with - return nil - } - } return r.removeVolume(ctx, v, force, timeout, false) } diff --git a/vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go b/vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go index 44d4f667d..f215667c1 100644 --- a/vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go @@ -201,7 +201,11 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti Inodes: volume.config.Inodes, Size: volume.config.Size, } - if err := q.SetQuota(fullVolPath, quota); err != nil { + // Must use volPathRoot not fullVolPath, as we need the + // base path for the volume - without the `_data` + // subdirectory - so the quota ID assignment logic works + // properly. + if err := q.SetQuota(volPathRoot, quota); err != nil { return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err) } } @@ -357,13 +361,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo return define.ErrVolumeRemoved } - v.lock.Lock() - defer v.lock.Unlock() - - // Update volume status to pick up a potential removal from state - if err := v.update(); err != nil { - return err - } + // DANGEROUS: Do not lock here yet because we might needed to remove containers first. + // In general we must always acquire the ctr lock before a volume lock so we cannot lock. + // THIS MUST BE DONE to prevent ABBA deadlocks. + // It also means the are several races around creating containers with volumes and removing + // them in parallel. However that problem exists regadless of taking the lock here or not. deps, err := r.state.VolumeInUse(v) if err != nil { @@ -400,6 +402,15 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo } } + // Ok now we are good to lock. + v.lock.Lock() + defer v.lock.Unlock() + + // Update volume status to pick up a potential removal from state + if err := v.update(); err != nil { + return err + } + // If the volume is still mounted - force unmount it if err := v.unmount(true); err != nil { if force { diff --git a/vendor/github.com/containers/podman/v5/libpod/shutdown/handler.go b/vendor/github.com/containers/podman/v5/libpod/shutdown/handler.go index d7fd5b532..e06e818c7 100644 --- a/vendor/github.com/containers/podman/v5/libpod/shutdown/handler.go +++ b/vendor/github.com/containers/podman/v5/libpod/shutdown/handler.go @@ -28,10 +28,18 @@ var ( shutdownInhibit sync.RWMutex logrus = logrusImport.WithField("PID", os.Getpid()) ErrNotStarted = errors.New("shutdown signal handler has not yet been started") + // exitCode used to exit once we are done with all signal handlers, by default 1 + exitCode = 1 ) +// SetExitCode when we exit after we ran all shutdown handlers, it should be positive. +func SetExitCode(i int) { + exitCode = i +} + // Start begins handling SIGTERM and SIGINT and will run the given on-signal -// handlers when one is called. This can be cancelled by calling Stop(). +// handlers when one is called and then exit with the exit code of 1 if not +// overwritten with SetExitCode(). This can be cancelled by calling Stop(). func Start() error { if sigChan != nil { // Already running, do nothing. @@ -75,6 +83,7 @@ func Start() error { } handlerLock.Unlock() shutdownInhibit.Unlock() + os.Exit(exitCode) return } }() @@ -131,29 +140,3 @@ func Register(name string, handler func(os.Signal) error) error { return nil } - -// Unregister un-registers a given shutdown handler. -func Unregister(name string) error { - handlerLock.Lock() - defer handlerLock.Unlock() - - if handlers == nil { - return nil - } - - if _, ok := handlers[name]; !ok { - return nil - } - - delete(handlers, name) - - newOrder := []string{} - for _, checkName := range handlerOrder { - if checkName != name { - newOrder = append(newOrder, checkName) - } - } - handlerOrder = newOrder - - return nil -} diff --git a/vendor/github.com/containers/podman/v5/libpod/sqlite_state.go b/vendor/github.com/containers/podman/v5/libpod/sqlite_state.go index f51cdc3d7..1e979727d 100644 --- a/vendor/github.com/containers/podman/v5/libpod/sqlite_state.go +++ b/vendor/github.com/containers/podman/v5/libpod/sqlite_state.go @@ -2157,6 +2157,7 @@ func (s *SQLiteState) Volume(name string) (*Volume, error) { if errors.Is(err, sql.ErrNoRows) { return nil, define.ErrNoSuchVolume } + return nil, fmt.Errorf("querying volume %s: %w", name, err) } vol := new(Volume) diff --git a/vendor/github.com/containers/podman/v5/libpod/stats_linux.go b/vendor/github.com/containers/podman/v5/libpod/stats_linux.go index 36f861aa3..c2f53fc16 100644 --- a/vendor/github.com/containers/podman/v5/libpod/stats_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/stats_linux.go @@ -3,6 +3,7 @@ package libpod import ( + "errors" "fmt" "strings" "syscall" @@ -36,6 +37,11 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev // Ubuntu does not have swap memory in cgroups because swap is often not enabled. cgroupStats, err := cgroup.Stat() if err != nil { + // cgroup.Stat() is not an atomic operation, so it is possible that the cgroup is removed + // while Stat() is running. Try to catch this case and return a more specific error. + if (errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENODEV)) && !cgroupExist(cgroupPath) { + return fmt.Errorf("cgroup %s does not exist: %w", cgroupPath, define.ErrCtrStopped) + } return fmt.Errorf("unable to obtain cgroup stats: %w", err) } conState := c.state.State diff --git a/vendor/github.com/containers/podman/v5/libpod/util.go b/vendor/github.com/containers/podman/v5/libpod/util.go index 21d864d98..49c9f10c2 100644 --- a/vendor/github.com/containers/podman/v5/libpod/util.go +++ b/vendor/github.com/containers/podman/v5/libpod/util.go @@ -223,13 +223,8 @@ func writeHijackHeader(r *http.Request, conn io.Writer, tty bool) { } } -// Convert OCICNI port bindings into Inspect-formatted port bindings. +// Generate inspect-formatted port mappings from the format used in our config file func makeInspectPortBindings(bindings []types.PortMapping) map[string][]define.InspectHostPort { - return makeInspectPorts(bindings, nil) -} - -// Convert OCICNI port bindings into Inspect-formatted port bindings with exposed, but not bound ports set to nil. -func makeInspectPorts(bindings []types.PortMapping, expose map[uint16][]string) map[string][]define.InspectHostPort { portBindings := make(map[string][]define.InspectHostPort) for _, port := range bindings { protocols := strings.Split(port.Protocol, ",") @@ -249,7 +244,12 @@ func makeInspectPorts(bindings []types.PortMapping, expose map[uint16][]string) } } } - // add exposed ports without host port information to match docker + + return portBindings +} + +// Add exposed ports to inspect port bindings. These must be done on a per-container basis, not per-netns basis. +func addInspectPortsExpose(expose map[uint16][]string, portBindings map[string][]define.InspectHostPort) { for port, protocols := range expose { for _, protocol := range protocols { key := fmt.Sprintf("%d/%s", port, protocol) @@ -258,7 +258,6 @@ func makeInspectPorts(bindings []types.PortMapping, expose map[uint16][]string) } } } - return portBindings } // Write a given string to a new file at a given path. diff --git a/vendor/github.com/containers/podman/v5/libpod/util_linux.go b/vendor/github.com/containers/podman/v5/libpod/util_linux.go index ac83857ee..574c58a01 100644 --- a/vendor/github.com/containers/podman/v5/libpod/util_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/util_linux.go @@ -120,7 +120,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, string, erro // When we run as rootless, the cgroup has a path like the following: ///sys/fs/cgroup/user.slice/user-@$UID.slice/user@$UID.service/user.slice/user-libpod_pod_$POD_ID.slice uid := rootless.GetRootlessUID() - raw := fmt.Sprintf("user.slice/%s-%d.slice/user@%d.service/%s/%s-%s%s", noSlice, uid, uid, baseSlice, noSlice, newSlice, sliceSuffix) + raw := fmt.Sprintf("user.slice/user-%d.slice/user@%d.service/%s/%s-%s%s", uid, uid, baseSlice, noSlice, newSlice, sliceSuffix) return raw, systemdPath, nil } return systemdPath, systemdPath, nil diff --git a/vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go b/vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go index f82090399..e3c54a266 100644 --- a/vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go +++ b/vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go @@ -480,7 +480,7 @@ func (u *updater) assembleImageMap(ctx context.Context) (map[string]*libimage.Im listOptions := &libimage.ListImagesOptions{ Filters: []string{"readonly=false"}, } - imagesSlice, err := u.runtime.LibimageRuntime().ListImages(ctx, nil, listOptions) + imagesSlice, err := u.runtime.LibimageRuntime().ListImages(ctx, listOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/connection.go b/vendor/github.com/containers/podman/v5/pkg/bindings/connection.go index 496e46915..0fce6c9a2 100644 --- a/vendor/github.com/containers/podman/v5/pkg/bindings/connection.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/connection.go @@ -1,6 +1,7 @@ package bindings import ( + "bytes" "context" "errors" "fmt" @@ -9,6 +10,7 @@ import ( "net/http" "net/url" "os" + "os/user" "strconv" "strings" "time" @@ -16,6 +18,7 @@ import ( "github.com/blang/semver/v4" "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v5/version" + "github.com/kevinburke/ssh_config" "github.com/sirupsen/logrus" "golang.org/x/net/proxy" ) @@ -86,7 +89,7 @@ func NewConnection(ctx context.Context, uri string) (context.Context, error) { // A valid URI connection should be scheme:// // For example tcp://localhost: // or unix:///run/podman/podman.sock -// or ssh://@[:port]/run/podman/podman.sock?secure=True +// or ssh://@[:port]/run/podman/podman.sock func NewConnectionWithIdentity(ctx context.Context, uri string, identity string, machine bool) (context.Context, error) { var ( err error @@ -108,30 +111,11 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string, var connection Connection switch _url.Scheme { case "ssh": - port := 22 - if _url.Port() != "" { - port, err = strconv.Atoi(_url.Port()) - if err != nil { - return nil, err - } - } - conn, err := ssh.Dial(&ssh.ConnectionDialOptions{ - Host: uri, - Identity: identity, - User: _url.User, - Port: port, - InsecureIsMachineConnection: machine, - }, "golang") + conn, err := sshClient(_url, uri, identity, machine) if err != nil { - return nil, newConnectError(err) + return nil, err } - connection = Connection{URI: _url} - connection.Client = &http.Client{ - Transport: &http.Transport{ - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - return ssh.DialNet(conn, "unix", _url) - }, - }} + connection = conn case "unix": if !strings.HasPrefix(uri, "unix:///") { // autofix unix://path_element vs unix:///path_element @@ -161,6 +145,98 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string, return ctx, nil } +func sshClient(_url *url.URL, uri string, identity string, machine bool) (Connection, error) { + var ( + err error + ) + connection := Connection{ + URI: _url, + } + userinfo := _url.User + if _url.User == nil { + u, err := user.Current() + if err != nil { + return connection, fmt.Errorf("current user could not be determined: %w", err) + } + userinfo = url.User(u.Username) + } + port := 22 + if _url.Port() != "" { + port, err = strconv.Atoi(_url.Port()) + if err != nil { + return connection, err + } + } + // ssh_config + alias := _url.Hostname() + cfg := ssh_config.DefaultUserSettings + found := false + if val := cfg.Get(alias, "User"); val != "" { + userinfo = url.User(val) + found = true + } + if val := cfg.Get(alias, "Hostname"); val != "" { + uri = val + found = true + } + if val := cfg.Get(alias, "Port"); val != "" { + if val != ssh_config.Default("Port") { + port, err = strconv.Atoi(val) + if err != nil { + return connection, fmt.Errorf("port is not an int: %s: %w", val, err) + } + found = true + } + } + if val := cfg.Get(alias, "IdentityFile"); val != "" { + if val != ssh_config.Default("IdentityFile") { + identity = strings.Trim(val, "\"") + found = true + } + } + if found { + logrus.Debugf("ssh_config alias found: %s", alias) + logrus.Debugf(" User: %s", userinfo.Username()) + logrus.Debugf(" Hostname: %s", uri) + logrus.Debugf(" Port: %d", port) + logrus.Debugf(" IdentityFile: %q", identity) + } + conn, err := ssh.Dial(&ssh.ConnectionDialOptions{ + Host: uri, + Identity: identity, + User: userinfo, + Port: port, + InsecureIsMachineConnection: machine, + }, ssh.GolangMode) + if err != nil { + return connection, newConnectError(err) + } + if _url.Path == "" { + session, err := conn.NewSession() + if err != nil { + return connection, err + } + defer session.Close() + + var b bytes.Buffer + session.Stdout = &b + if err := session.Run( + "podman info --format '{{.Host.RemoteSocket.Path}}'"); err != nil { + return connection, err + } + val := strings.TrimSuffix(b.String(), "\n") + _url.Path = val + } + dialContext := func(ctx context.Context, _, _ string) (net.Conn, error) { + return ssh.DialNet(conn, "unix", _url) + } + connection.Client = &http.Client{ + Transport: &http.Transport{ + DialContext: dialContext, + }} + return connection, nil +} + func tcpClient(_url *url.URL) (Connection, error) { connection := Connection{ URI: _url, @@ -276,7 +352,12 @@ func (c *Connection) DoRequest(ctx context.Context, httpBody io.Reader, httpMeth params[i+1] = url.PathEscape(pv) } - uri := fmt.Sprintf("http://d/v%s/libpod"+endpoint, params...) + baseURL := "http://d" + if c.URI.Scheme == "tcp" { + // Allow path prefixes for tcp connections to match Docker behavior + baseURL = "http://" + c.URI.Host + c.URI.Path + } + uri := fmt.Sprintf(baseURL+"/v%s/libpod"+endpoint, params...) logrus.Debugf("DoRequest Method: %s URI: %v", httpMethod, uri) req, err := http.NewRequestWithContext(ctx, httpMethod, uri, httpBody) diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go index 116ed9187..5665db211 100644 --- a/vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go @@ -405,6 +405,9 @@ func Stop(ctx context.Context, nameOrID string, options *StopOptions) error { return err } defer response.Body.Close() + if options.GetIgnore() && response.StatusCode == http.StatusNotFound { + return nil + } return response.Process(nil) } diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go index 1ac370aae..11ba58c6d 100644 --- a/vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go @@ -22,6 +22,7 @@ import ( "github.com/containers/podman/v5/pkg/auth" "github.com/containers/podman/v5/pkg/bindings" "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/ioutils" @@ -49,6 +50,42 @@ type BuildResponse struct { Aux json.RawMessage `json:"aux,omitempty"` } +// Modify the build contexts that uses a local windows path. The windows path is +// converted into the corresping guest path in the default Windows machine +// (e.g. C:\test ==> /mnt/c/test). +func convertAdditionalBuildContexts(additionalBuildContexts map[string]*define.AdditionalBuildContext) { + for _, context := range additionalBuildContexts { + if !context.IsImage && !context.IsURL { + path, err := specgen.ConvertWinMountPath(context.Value) + // It's not worth failing if the path can't be converted + if err == nil { + context.Value = path + } + } + } +} + +// convertVolumeSrcPath converts windows paths in the HOST-DIR part of a volume +// into the corresponding path in the default Windows machine. +// (e.g. C:\test:/src/docs ==> /mnt/c/test:/src/docs). +// If any error occurs while parsing the volume string, the original volume +// string is returned. +func convertVolumeSrcPath(volume string) string { + splitVol := specgen.SplitVolumeString(volume) + if len(splitVol) < 2 || len(splitVol) > 3 { + return volume + } + convertedSrcPath, err := specgen.ConvertWinMountPath(splitVol[0]) + if err != nil { + return volume + } + if len(splitVol) == 2 { + return convertedSrcPath + ":" + splitVol[1] + } else { + return convertedSrcPath + ":" + splitVol[1] + ":" + splitVol[2] + } +} + // Build creates an image using a containerfile reference func Build(ctx context.Context, containerFiles []string, options types.BuildOptions) (*types.BuildReport, error) { if options.CommonBuildOpts == nil { @@ -90,6 +127,10 @@ func Build(ctx context.Context, containerFiles []string, options types.BuildOpti params.Add("t", tag) } if additionalBuildContexts := options.AdditionalBuildContexts; len(additionalBuildContexts) > 0 { + // TODO: Additional build contexts should be packaged and sent as tar files + // For the time being we make our best to make them accessible on remote + // machines too (i.e. on macOS and Windows). + convertAdditionalBuildContexts(additionalBuildContexts) additionalBuildContextMap, err := jsoniter.Marshal(additionalBuildContexts) if err != nil { return nil, err @@ -239,6 +280,10 @@ func Build(ctx context.Context, containerFiles []string, options types.BuildOpti if len(options.RusageLogFile) > 0 { params.Set("rusagelogfile", options.RusageLogFile) } + + params.Set("retry", strconv.Itoa(options.MaxPullPushRetries)) + params.Set("retry-delay", options.PullPushRetryDelay.String()) + if len(options.Manifest) > 0 { params.Set("manifest", options.Manifest) } @@ -332,7 +377,7 @@ func Build(ctx context.Context, containerFiles []string, options types.BuildOpti } for _, volume := range options.CommonBuildOpts.Volumes { - params.Add("volume", volume) + params.Add("volume", convertVolumeSrcPath(volume)) } for _, group := range options.GroupAdd { diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go index 7621f8cd7..0db6d6f02 100644 --- a/vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go @@ -93,6 +93,8 @@ type PruneOptions struct { All *bool // Prune images even when they're used by external containers External *bool + // Prune persistent build cache + BuildCache *bool // Filters to apply when pruning images Filters map[string][]string } diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go index ead74e7ea..bbd12a169 100644 --- a/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go @@ -47,6 +47,21 @@ func (o *PruneOptions) GetExternal() bool { return *o.External } +// WithBuildCache set field BuildCache to given value +func (o *PruneOptions) WithBuildCache(value bool) *PruneOptions { + o.BuildCache = &value + return o +} + +// GetBuildCache returns value of field BuildCache +func (o *PruneOptions) GetBuildCache() bool { + if o.BuildCache == nil { + var z bool + return z + } + return *o.BuildCache +} + // WithFilters set field Filters to given value func (o *PruneOptions) WithFilters(value map[string][]string) *PruneOptions { o.Filters = value diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go index a41bfb1f8..b2eacd4a6 100644 --- a/vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go @@ -3,9 +3,7 @@ package system import ( "context" "encoding/json" - "errors" "fmt" - "io" "net/http" "time" @@ -31,7 +29,6 @@ func Events(ctx context.Context, eventChan chan types.Event, cancelChan chan boo if err != nil { return err } - defer response.Body.Close() if cancelChan != nil { go func() { @@ -43,26 +40,23 @@ func Events(ctx context.Context, eventChan chan types.Event, cancelChan chan boo } if response.StatusCode != http.StatusOK { + defer response.Body.Close() return response.Process(nil) } - dec := json.NewDecoder(response.Body) - for err = (error)(nil); err == nil; { - var e = types.Event{} - err = dec.Decode(&e) - if err == nil { - eventChan <- e + go func() { + defer response.Body.Close() + defer close(eventChan) + dec := json.NewDecoder(response.Body) + for err = (error)(nil); err == nil; { + var e = types.Event{} + err = dec.Decode(&e) + if err == nil { + eventChan <- e + } } - } - close(eventChan) - switch { - case err == nil: - return nil - case errors.Is(err, io.EOF): - return nil - default: - return fmt.Errorf("unable to decode event response: %w", err) - } + }() + return nil } // Prune removes all unused system data. diff --git a/vendor/github.com/containers/podman/v5/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/vendor/github.com/containers/podman/v5/pkg/checkpoint/crutils/checkpoint_restore_utils.go index 3736ea1e2..ad4ea9e27 100644 --- a/vendor/github.com/containers/podman/v5/pkg/checkpoint/crutils/checkpoint_restore_utils.go +++ b/vendor/github.com/containers/podman/v5/pkg/checkpoint/crutils/checkpoint_restore_utils.go @@ -229,7 +229,18 @@ func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool { func CRRuntimeSupportsPodCheckpointRestore(runtimePath string) bool { cmd := exec.Command(runtimePath, "restore", "--lsm-mount-context") out, _ := cmd.CombinedOutput() - return bytes.Contains(out, []byte("flag needs an argument")) + + // check for runc + if bytes.Contains(out, []byte("flag needs an argument")) { + return true + } + + // check for crun + if bytes.Contains(out, []byte("requires an argument")) { + return true + } + + return false } // CRGetRuntimeFromArchive extracts the checkpoint metadata from the diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go index 24c1d78f8..105b09154 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go @@ -371,6 +371,7 @@ type ContainerCleanupOptions struct { Latest bool Remove bool RemoveImage bool + StoppedOnly bool // Only cleanup if the container is stopped, i.e. was running before } // ContainerCleanupReport describes the response from a diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go index 8179b9a67..65844f676 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go @@ -3,8 +3,8 @@ package entities import ( "context" + "github.com/containers/common/libimage/define" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v5/pkg/domain/entities/reports" ) @@ -23,7 +23,7 @@ type ImageEngine interface { //nolint:interfacebloat Push(ctx context.Context, source string, destination string, opts ImagePushOptions) (*ImagePushReport, error) Remove(ctx context.Context, images []string, opts ImageRemoveOptions) (*ImageRemoveReport, []error) Save(ctx context.Context, nameOrID string, tags []string, options ImageSaveOptions) error - Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error + Scp(ctx context.Context, src, dst string, opts ImageScpOptions) (*ImageScpReport, error) Search(ctx context.Context, term string, opts ImageSearchOptions) ([]ImageSearchReport, error) SetTrust(ctx context.Context, args []string, options SetTrustOptions) error ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error) @@ -34,7 +34,7 @@ type ImageEngine interface { //nolint:interfacebloat Untag(ctx context.Context, nameOrID string, tags []string, options ImageUntagOptions) error ManifestCreate(ctx context.Context, name string, images []string, opts ManifestCreateOptions) (string, error) ManifestExists(ctx context.Context, name string) (*BoolReport, error) - ManifestInspect(ctx context.Context, name string, opts ManifestInspectOptions) ([]byte, error) + ManifestInspect(ctx context.Context, name string, opts ManifestInspectOptions) (*define.ManifestListData, error) ManifestAdd(ctx context.Context, listName string, imageNames []string, opts ManifestAddOptions) (string, error) ManifestAddArtifact(ctx context.Context, name string, files []string, opts ManifestAddArtifactOptions) (string, error) ManifestAnnotate(ctx context.Context, names, image string, opts ManifestAnnotateOptions) (string, error) diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go index 9b3a40eb7..063a43b77 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go @@ -249,9 +249,10 @@ type ImageListOptions struct { } type ImagePruneOptions struct { - All bool `json:"all" schema:"all"` - External bool `json:"external" schema:"external"` - Filter []string `json:"filter" schema:"filter"` + All bool `json:"all" schema:"all"` + External bool `json:"external" schema:"external"` + BuildCache bool `json:"buildcache" schema:"buildcache"` + Filter []string `json:"filter" schema:"filter"` } type ImageTagOptions struct{} @@ -304,22 +305,14 @@ type ImageSaveOptions struct { SignaturePolicy string } -// ImageScpOptions provide options for securely copying images to and from a remote host +// ImageScpOptions provides options for ImageEngine.Scp() type ImageScpOptions struct { - // Remote determines if this entity is operating on a remote machine - Remote bool `json:"remote,omitempty"` - // File is the input/output file for the save and load Operation - File string `json:"file,omitempty"` - // Quiet Determines if the save and load operation will be done quietly - Quiet bool `json:"quiet,omitempty"` - // Image is the image the user is providing to save and load - Image string `json:"image,omitempty"` - // User is used in conjunction with Transfer to determine if a valid user was given to save from/load into - User string `json:"user,omitempty"` - // Tag is the name to be used for the image on the destination - Tag string `json:"tag,omitempty"` + ScpExecuteTransferOptions } +// ImageScpReport provides results from ImageEngine.Scp() +type ImageScpReport struct{} + // ImageScpConnections provides the ssh related information used in remote image transfer type ImageScpConnections struct { // Connections holds the raw string values for connections (ssh or unix) diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go index 74c025db5..c6db89d3c 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go @@ -28,8 +28,14 @@ type MachineInfo struct { // MachineHostInfo contains info on the machine host type MachineHostInfo struct { - Arch string `json:"Arch"` - CurrentMachine string `json:"CurrentMachine"` + Arch string `json:"Arch"` + CurrentMachine string `json:"CurrentMachine"` + // TODO(6.0): Change `DefaultName` to `ActiveMachineConnection` to fix address + // confusion as shown in https://github.com/containers/podman/issues/23353. + // The name `DefaultMachine` can cause confusion with the user in thinking that + // they can set a default podman machine via system connections. However, + // regardless of which system connection is default, the default podman machine + // will always be podman-machine-default. DefaultMachine string `json:"DefaultMachine"` EventsDir string `json:"EventsDir"` MachineConfigDir string `json:"MachineConfigDir"` diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go index 63b88e335..96c4a3bf6 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go @@ -134,135 +134,138 @@ const ( ) type ContainerCreateOptions struct { - Annotation []string - Attach []string - Authfile string - BlkIOWeight string - BlkIOWeightDevice []string - CapAdd []string - CapDrop []string - CgroupNS string - CgroupsMode string - CgroupParent string `json:"cgroup_parent,omitempty"` - CIDFile string - ConmonPIDFile string `json:"container_conmon_pidfile,omitempty"` - CPUPeriod uint64 - CPUQuota int64 - CPURTPeriod uint64 - CPURTRuntime int64 - CPUShares uint64 - CPUS float64 `json:"cpus,omitempty"` - CPUSetCPUs string `json:"cpuset_cpus,omitempty"` - CPUSetMems string - Devices []string `json:"devices,omitempty"` - DeviceCgroupRule []string - DeviceReadBPs []string `json:"device_read_bps,omitempty"` - DeviceReadIOPs []string - DeviceWriteBPs []string - DeviceWriteIOPs []string - Entrypoint *string `json:"container_command,omitempty"` - Env []string - EnvHost bool - EnvFile []string - Expose []string - GIDMap []string - GPUs []string - GroupAdd []string - HealthCmd string - HealthInterval string - HealthRetries uint - HealthStartPeriod string - HealthTimeout string - HealthOnFailure string - Hostname string `json:"hostname,omitempty"` - HTTPProxy bool - HostUsers []string - ImageVolume string - Init bool - InitContainerType string - InitPath string - IntelRdtClosID string - Interactive bool - IPC string - Label []string - LabelFile []string - LogDriver string - LogOptions []string - Memory string - MemoryReservation string - MemorySwap string - MemorySwappiness int64 - Name string `json:"container_name"` - NoHealthCheck bool - OOMKillDisable bool - OOMScoreAdj *int - Arch string - OS string - Variant string - PID string `json:"pid,omitempty"` - PIDsLimit *int64 - Platform string - Pod string - PodIDFile string - Personality string - PreserveFDs uint - PreserveFD []uint - Privileged bool - PublishAll bool - Pull string - Quiet bool - ReadOnly bool - ReadWriteTmpFS bool - Restart string - Replace bool - Requires []string - Retry *uint `json:"retry,omitempty"` - RetryDelay string `json:"retry_delay,omitempty"` - Rm bool - RootFS bool - Secrets []string - SecurityOpt []string `json:"security_opt,omitempty"` - SdNotifyMode string - ShmSize string - ShmSizeSystemd string - SignaturePolicy string - StartupHCCmd string - StartupHCInterval string - StartupHCRetries uint - StartupHCSuccesses uint - StartupHCTimeout string - StopSignal string - StopTimeout uint - StorageOpts []string - SubGIDName string - SubUIDName string - Sysctl []string `json:"sysctl,omitempty"` - Systemd string - Timeout uint - TLSVerify commonFlag.OptionalBool - TmpFS []string - TTY bool - Timezone string - Umask string - EnvMerge []string - UnsetEnv []string - UnsetEnvAll bool - UIDMap []string - Ulimit []string - User string - UserNS string `json:"-"` - UTS string - Mount []string - Volume []string `json:"volume,omitempty"` - VolumesFrom []string `json:"volumes_from,omitempty"` - Workdir string - SeccompPolicy string - PidFile string - ChrootDirs []string - IsInfra bool - IsClone bool - DecryptionKeys []string - Net *NetOptions `json:"net,omitempty"` + Annotation []string + Attach []string + Authfile string + BlkIOWeight string + BlkIOWeightDevice []string + CapAdd []string + CapDrop []string + CgroupNS string + CgroupsMode string + CgroupParent string `json:"cgroup_parent,omitempty"` + CIDFile string + ConmonPIDFile string `json:"container_conmon_pidfile,omitempty"` + CPUPeriod uint64 + CPUQuota int64 + CPURTPeriod uint64 + CPURTRuntime int64 + CPUShares uint64 + CPUS float64 `json:"cpus,omitempty"` + CPUSetCPUs string `json:"cpuset_cpus,omitempty"` + CPUSetMems string + Devices []string `json:"devices,omitempty"` + DeviceCgroupRule []string + DeviceReadBPs []string `json:"device_read_bps,omitempty"` + DeviceReadIOPs []string + DeviceWriteBPs []string + DeviceWriteIOPs []string + Entrypoint *string `json:"container_command,omitempty"` + Env []string + EnvHost bool + EnvFile []string + Expose []string + GIDMap []string + GPUs []string + GroupAdd []string + HealthCmd string + HealthInterval string + HealthRetries uint + HealthLogDestination string + HealthMaxLogCount uint + HealthMaxLogSize uint + HealthStartPeriod string + HealthTimeout string + HealthOnFailure string + Hostname string `json:"hostname,omitempty"` + HTTPProxy bool + HostUsers []string + ImageVolume string + Init bool + InitContainerType string + InitPath string + IntelRdtClosID string + Interactive bool + IPC string + Label []string + LabelFile []string + LogDriver string + LogOptions []string + Memory string + MemoryReservation string + MemorySwap string + MemorySwappiness int64 + Name string `json:"container_name"` + NoHealthCheck bool + OOMKillDisable bool + OOMScoreAdj *int + Arch string + OS string + Variant string + PID string `json:"pid,omitempty"` + PIDsLimit *int64 + Platform string + Pod string + PodIDFile string + Personality string + PreserveFDs uint + PreserveFD []uint + Privileged bool + PublishAll bool + Pull string + Quiet bool + ReadOnly bool + ReadWriteTmpFS bool + Restart string + Replace bool + Requires []string + Retry *uint `json:"retry,omitempty"` + RetryDelay string `json:"retry_delay,omitempty"` + Rm bool + RootFS bool + Secrets []string + SecurityOpt []string `json:"security_opt,omitempty"` + SdNotifyMode string + ShmSize string + ShmSizeSystemd string + SignaturePolicy string + StartupHCCmd string + StartupHCInterval string + StartupHCRetries uint + StartupHCSuccesses uint + StartupHCTimeout string + StopSignal string + StopTimeout uint + StorageOpts []string + SubGIDName string + SubUIDName string + Sysctl []string `json:"sysctl,omitempty"` + Systemd string + Timeout uint + TLSVerify commonFlag.OptionalBool + TmpFS []string + TTY bool + Timezone string + Umask string + EnvMerge []string + UnsetEnv []string + UnsetEnvAll bool + UIDMap []string + Ulimit []string + User string + UserNS string `json:"-"` + UTS string + Mount []string + Volume []string `json:"volume,omitempty"` + VolumesFrom []string `json:"volumes_from,omitempty"` + Workdir string + SeccompPolicy string + PidFile string + ChrootDirs []string + IsInfra bool + IsClone bool + DecryptionKeys []string + Net *NetOptions `json:"net,omitempty"` CgroupConf []string diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/scp.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/scp.go new file mode 100644 index 000000000..93b1629cd --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/scp.go @@ -0,0 +1,97 @@ +package entities + +import ( + "net/url" + + "github.com/containers/common/pkg/ssh" +) + +// ScpTransferImageOptions provide options for securely copying images to and from a remote host +type ScpTransferImageOptions struct { + // Remote determines if this entity is operating on a remote machine + Remote bool `json:"remote,omitempty"` + // File is the input/output file for the save and load Operation + File string `json:"file,omitempty"` + // Quiet Determines if the save and load operation will be done quietly + Quiet bool `json:"quiet,omitempty"` + // Image is the image the user is providing to save and load + Image string `json:"image,omitempty"` + // User is used in conjunction with Transfer to determine if a valid user was given to save from/load into + User string `json:"user,omitempty"` + // Tag is the name to be used for the image on the destination + Tag string `json:"tag,omitempty"` +} + +type ScpLoadReport = ImageLoadReport + +type ScpExecuteTransferOptions struct { + // ParentFlags are the arguments to apply to the parent podman command when called via ssh + ParentFlags []string + // Quiet Determines if the save and load operation will be done quietly + Quiet bool + // SSHMode is the specified ssh.EngineMode which should be used + SSHMode ssh.EngineMode +} + +type ScpExecuteTransferReport struct { + // LoadReport provides results from calling podman load + LoadReport *ScpLoadReport + // Source contains data relating to the source of the image to transfer + Source *ScpTransferImageOptions + // Dest contains data relating to the destination of the image to transfer + Dest *ScpTransferImageOptions + // ParentFlags are the arguments to apply to the parent podman command when called via ssh + ParentFlags []string +} + +type ScpTransferOptions struct { + // ParentFlags are the arguments to apply to the parent podman command when called. + ParentFlags []string +} + +type ScpTransferReport struct{} + +type ScpLoadToRemoteOptions struct { + // Dest contains data relating to the destination of the image to transfer + Dest ScpTransferImageOptions + // LocalFile is a path to a local file containing saved image data to transfer + LocalFile string + // Tag is the name of the tag to be given to the loaded image (unused) + Tag string + // URL points to the remote location for loading to + URL *url.URL + // Iden is a path to an optional identity file with ssh key + Iden string + // SSHMode is the specified ssh.EngineMode which should be used + SSHMode ssh.EngineMode +} + +type ScpLoadToRemoteReport struct { + // Response contains any additional information from the executed load command + Response string + // ID is the identifier of the loaded image + ID string +} + +type ScpSaveToRemoteOptions struct { + Image string + // LocalFile is a path to a local file to copy the saved image to + LocalFile string + // Tag is the name of the tag to be given to the saved image (unused) + Tag string + // URL points to the remote location for saving from + URL *url.URL + // Iden is a path to an optional identity file with ssh key + Iden string + // SSHMode is the specified ssh.EngineMode which should be used + SSHMode ssh.EngineMode +} + +type ScpSaveToRemoteReport struct{} + +type ScpCreateCommandsOptions struct { + // ParentFlags are the arguments to apply to the parent podman command when called via ssh + ParentFlags []string + // Podman is the path to the local podman executable + Podman string +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go index 52901c03f..277460ed7 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go @@ -90,7 +90,7 @@ type DiffReport struct { type EventsOptions struct { FromStart bool - EventChan chan *events.Event + EventChan chan events.ReadResult Filter []string Stream bool Since string diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go index 3cbeb2cd6..0a0dbe335 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go @@ -35,6 +35,8 @@ import ( "github.com/containers/podman/v5/pkg/specgenutil" "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" + "github.com/containers/storage/types" + "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) @@ -290,15 +292,35 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin return err } } - err = c.Cleanup(ctx) - if err != nil { - // Issue #7384 and #11384: If the container is configured for - // auto-removal, it might already have been removed at this point. - // We still need to clean up since we do not know if the other cleanup process is successful - if c.AutoRemove() && (errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved)) { - return nil + if c.AutoRemove() { + _, imageName := c.Image() + + if err := ic.Libpod.RemoveContainer(ctx, c, false, true, nil); err != nil { + // Issue #7384 and #11384: If the container is configured for + // auto-removal, it might already have been removed at this point. + // We still need to clean up since we do not know if the other cleanup process is successful + if !(errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved)) { + return err + } + } + + if c.AutoRemoveImage() { + imageEngine := ImageEngine{Libpod: ic.Libpod} + _, rmErrors := imageEngine.Remove(ctx, []string{imageName}, entities.ImageRemoveOptions{Ignore: true}) + if len(rmErrors) > 0 { + mErr := multierror.Append(nil, rmErrors...) + return fmt.Errorf("removing container %s image %s: %w", c.ID(), imageName, mErr) + } + } + } else { + if err = c.Cleanup(ctx, false); err != nil { + // The container could still have been removed, as we unlocked + // after we stopped it. + if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { + return nil + } + return err } - return err } return nil }) @@ -445,6 +467,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, errMap, err := parallelctr.ContainerOp(ctx, libpodContainers, func(c *libpod.Container) error { mapMutex.Lock() if _, ok := ctrsMap[c.ID()]; ok { + mapMutex.Unlock() return nil } mapMutex.Unlock() @@ -826,7 +849,7 @@ func makeExecConfig(options entities.ExecOptions, rt *libpod.Runtime) (*libpod.E return nil, fmt.Errorf("retrieving Libpod configuration to build exec exit command: %w", err) } // TODO: Add some ability to toggle syslog - exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, true) + exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, false, true) if err != nil { return nil, fmt.Errorf("constructing exit command for exec session: %w", err) } @@ -1267,6 +1290,10 @@ func (ic *ContainerEngine) ContainerLogs(ctx context.Context, namesOrIds []strin func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []string, options entities.ContainerCleanupOptions) ([]*entities.ContainerCleanupReport, error) { containers, err := getContainers(ic.Libpod, getContainersOptions{all: options.All, latest: options.Latest, names: namesOrIds}) if err != nil { + // cleanup command spawned by conmon lost race as another process already removed the ctr + if errors.Is(err, define.ErrNoSuchCtr) { + return nil, nil + } return nil, err } reports := []*entities.ContainerCleanupReport{} @@ -1276,13 +1303,13 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st if options.Exec != "" { if options.Remove { - if err := ctr.ExecRemove(options.Exec, false); err != nil { - return nil, err - } + err = ctr.ExecRemove(options.Exec, false) } else { - if err := ctr.ExecCleanup(options.Exec); err != nil { - return nil, err - } + err = ctr.ExecCleanup(options.Exec) + } + // If ErrNoSuchExecSession then the exec session was already removed so do not report an error. + if err != nil && !errors.Is(err, define.ErrNoSuchExecSession) { + return nil, err } return []*entities.ContainerCleanupReport{}, nil } @@ -1290,12 +1317,13 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st if options.Remove && !ctr.ShouldRestart(ctx) { var timeout *uint err = ic.Libpod.RemoveContainer(ctx, ctr.Container, false, true, timeout) - if err != nil { + if err != nil && !errors.Is(err, define.ErrNoSuchCtr) { report.RmErr = fmt.Errorf("failed to clean up and remove container %v: %w", ctr.ID(), err) } } else { - err := ctr.Cleanup(ctx) - if err != nil { + err := ctr.Cleanup(ctx, options.StoppedOnly) + // ignore error if ctr is removed or cannot be cleaned up, likely the ctr was already restarted by another process + if err != nil && !errors.Is(err, define.ErrNoSuchCtr) && !errors.Is(err, define.ErrCtrStateInvalid) { report.CleanErr = fmt.Errorf("failed to clean up container %v: %w", ctr.ID(), err) } } @@ -1303,7 +1331,7 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st if options.RemoveImage { _, imageName := ctr.Image() imageEngine := ImageEngine{Libpod: ic.Libpod} - _, rmErrors := imageEngine.Remove(ctx, []string{imageName}, entities.ImageRemoveOptions{}) + _, rmErrors := imageEngine.Remove(ctx, []string{imageName}, entities.ImageRemoveOptions{Ignore: true}) report.RmiErr = errorhandling.JoinErrors(rmErrors) } @@ -1368,6 +1396,11 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []strin for _, ctr := range containers { report := entities.ContainerMountReport{Id: ctr.ID()} report.Path, report.Err = ctr.Mount() + if options.All && + (errors.Is(report.Err, define.ErrNoSuchCtr) || + errors.Is(report.Err, define.ErrCtrRemoved)) { + continue + } reports = append(reports, &report) } if len(reports) > 0 { @@ -1382,7 +1415,14 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []strin for _, sctr := range storageCtrs { mounted, path, err := ic.Libpod.IsStorageContainerMounted(sctr.ID) if err != nil { - return nil, err + // ErrCtrExists means this is a libpod container, we handle that below. + // This can only happen in a narrow race because we first create the storage + // container and then the libpod container so the StorageContainers() call + // above would need to happen in that interval. + if errors.Is(err, types.ErrContainerUnknown) || errors.Is(err, types.ErrLayerUnknown) || errors.Is(err, define.ErrCtrExists) { + continue + } + return nil, fmt.Errorf("check if storage container is mounted: %w", err) } var name string @@ -1406,7 +1446,11 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []strin for _, ctr := range containers { mounted, path, err := ctr.Mounted() if err != nil { - return nil, err + if errors.Is(err, define.ErrNoSuchCtr) || + errors.Is(err, define.ErrCtrRemoved) { + continue + } + return nil, fmt.Errorf("check if container is mounted: %w", err) } if mounted { @@ -1715,6 +1759,10 @@ func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts enti spec.Name = generate.CheckName(ic.Libpod, n, true) } + spec.HealthLogDestination = define.DefaultHealthCheckLocalDestination + spec.HealthMaxLogCount = define.DefaultHealthMaxLogCount + spec.HealthMaxLogSize = define.DefaultHealthMaxLogSize + rtSpec, spec, opts, err := generate.MakeContainer(context.Background(), ic.Libpod, spec, true, c) if err != nil { return nil, err diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go index d58fb0323..58e602013 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go @@ -244,6 +244,16 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, return nil, err } typeContent = append(typeContent, b) + case define.K8sKindJob: + job, err := libpod.GenerateForKubeJob(ctx, libpod.ConvertV1PodToYAMLPod(po), options) + if err != nil { + return nil, err + } + b, err := generateKubeYAML(job) + if err != nil { + return nil, err + } + typeContent = append(typeContent, b) case define.K8sKindPod: b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po)) if err != nil { @@ -251,7 +261,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, } typeContent = append(typeContent, b) default: - return nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported: %+v", options.Type) + return nil, fmt.Errorf("invalid generation type - only pods, deployments, jobs, and daemonsets are currently supported: %+v", options.Type) } if options.Service { @@ -311,6 +321,16 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener return nil, nil, err } out = append(out, b) + case define.K8sKindJob: + job, err := libpod.GenerateForKubeJob(ctx, libpod.ConvertV1PodToYAMLPod(po), options) + if err != nil { + return nil, nil, err + } + b, err := generateKubeYAML(job) + if err != nil { + return nil, nil, err + } + out = append(out, b) case define.K8sKindPod: b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po)) if err != nil { @@ -318,7 +338,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener } out = append(out, b) default: - return nil, nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported") + return nil, nil, fmt.Errorf("invalid generation type - only pods, deployments, jobs, and daemonsets are currently supported") } if options.Service { diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go index 16b8b662a..04e456571 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go @@ -13,16 +13,17 @@ import ( "os/user" "path" "path/filepath" + "slices" "strconv" "strings" "syscall" "time" bdefine "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/volumes" "github.com/containers/common/libimage" "github.com/containers/common/libimage/filter" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -37,6 +38,7 @@ import ( "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/storage" + "github.com/containers/storage/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -106,6 +108,13 @@ func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOption numPreviouslyRemovedImages = numRemovedImages } + if opts.BuildCache || opts.All { + // Clean build cache if any + if err := volumes.CleanCacheMount(); err != nil { + return nil, err + } + } + return pruneReports, nil } @@ -145,8 +154,28 @@ func (ir *ImageEngine) History(ctx context.Context, nameOrID string, opts entiti } func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entities.ImageMountOptions) ([]*entities.ImageMountReport, error) { - if opts.All && len(nameOrIDs) > 0 { + listMountsOnly := false + var images []*libimage.Image + var err error + switch { + case opts.All && len(nameOrIDs) > 0: return nil, errors.New("cannot mix --all with images") + case len(nameOrIDs) > 0: + images, err = ir.Libpod.LibimageRuntime().ListImagesByNames(nameOrIDs) + if err != nil { + return nil, err + } + default: + listImagesOptions := &libimage.ListImagesOptions{} + if opts.All { + listImagesOptions.Filters = []string{"readonly=false"} + } else { + listMountsOnly = true + } + images, err = ir.Libpod.LibimageRuntime().ListImages(ctx, listImagesOptions) + if err != nil { + return nil, err + } } if os.Geteuid() != 0 { @@ -165,17 +194,7 @@ func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entit } } - listImagesOptions := &libimage.ListImagesOptions{} - if opts.All { - listImagesOptions.Filters = []string{"readonly=false"} - } - images, err := ir.Libpod.LibimageRuntime().ListImages(ctx, nameOrIDs, listImagesOptions) - if err != nil { - return nil, err - } - mountReports := []*entities.ImageMountReport{} - listMountsOnly := !opts.All && len(nameOrIDs) == 0 for _, i := range images { var mountPoint string var err error @@ -183,6 +202,9 @@ func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entit // We're only looking for mounted images. mountPoint, err = i.Mountpoint() if err != nil { + if errors.Is(err, types.ErrImageUnknown) { + continue + } return nil, err } // Not mounted, so skip. @@ -211,17 +233,25 @@ func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entit } func (ir *ImageEngine) Unmount(ctx context.Context, nameOrIDs []string, options entities.ImageUnmountOptions) ([]*entities.ImageUnmountReport, error) { - if options.All && len(nameOrIDs) > 0 { + var images []*libimage.Image + var err error + switch { + case options.All && len(nameOrIDs) > 0: return nil, errors.New("cannot mix --all with images") - } - - listImagesOptions := &libimage.ListImagesOptions{} - if options.All { - listImagesOptions.Filters = []string{"readonly=false"} - } - images, err := ir.Libpod.LibimageRuntime().ListImages(ctx, nameOrIDs, listImagesOptions) - if err != nil { - return nil, err + case len(nameOrIDs) > 0: + images, err = ir.Libpod.LibimageRuntime().ListImagesByNames(nameOrIDs) + if err != nil { + return nil, err + } + default: + listImagesOptions := &libimage.ListImagesOptions{} + if options.All { + listImagesOptions.Filters = []string{"readonly=false"} + } + images, err = ir.Libpod.LibimageRuntime().ListImages(ctx, listImagesOptions) + if err != nil { + return nil, err + } } unmountReports := []*entities.ImageUnmountReport{} @@ -742,38 +772,42 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie return nil, nil } -func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error { - rep, source, dest, flags, err := domainUtils.ExecuteTransfer(src, dst, parentFlags, quiet, sshMode) +func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, opts entities.ImageScpOptions) (*entities.ImageScpReport, error) { + report, err := domainUtils.ExecuteTransfer(src, dst, opts.ScpExecuteTransferOptions) if err != nil { - return err + return nil, err } - if (rep == nil && err == nil) && (source != nil && dest != nil) { // we need to execute the transfer - err := Transfer(ctx, *source, *dest, flags) + if (report.LoadReport == nil && err == nil) && (report.Source != nil && report.Dest != nil) { // we need to execute the transfer + transferOpts := entities.ScpTransferOptions{} + transferOpts.ParentFlags = report.ParentFlags + _, err := Transfer(ctx, *report.Source, *report.Dest, transferOpts) if err != nil { - return err + return nil, err } } - return nil + return &entities.ImageScpReport{}, nil } -func Transfer(ctx context.Context, source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string) error { +func Transfer(ctx context.Context, source entities.ScpTransferImageOptions, dest entities.ScpTransferImageOptions, opts entities.ScpTransferOptions) (*entities.ScpTransferReport, error) { if source.User == "" { - return fmt.Errorf("you must define a user when transferring from root to rootless storage: %w", define.ErrInvalidArg) + return nil, fmt.Errorf("you must define a user when transferring from root to rootless storage: %w", define.ErrInvalidArg) } podman, err := os.Executable() if err != nil { - return err + return nil, err } + rep := entities.ScpTransferReport{} if rootless.IsRootless() && (len(dest.User) == 0 || dest.User == "root") { // if we are rootless and do not have a destination user we can just use sudo - return transferRootless(source, dest, podman, parentFlags) + return &rep, transferRootless(source, dest, podman, opts.ParentFlags) } - return transferRootful(source, dest, podman, parentFlags) + return &rep, transferRootful(source, dest, podman, opts.ParentFlags) } // TransferRootless creates new podman processes using exec.Command and sudo, transferring images between the given source and destination users -func transferRootless(source entities.ImageScpOptions, dest entities.ImageScpOptions, podman string, parentFlags []string) error { +func transferRootless(source entities.ScpTransferImageOptions, dest entities.ScpTransferImageOptions, podman string, parentFlags []string) error { var cmdSave *exec.Cmd - saveCommand, loadCommand := parentFlags, parentFlags + saveCommand := slices.Clone(parentFlags) + loadCommand := slices.Clone(parentFlags) saveCommand = append(saveCommand, []string{"save"}...) loadCommand = append(loadCommand, []string{"load"}...) if source.Quiet { @@ -812,7 +846,7 @@ func transferRootless(source entities.ImageScpOptions, dest entities.ImageScpOpt } // transferRootful creates new podman processes using exec.Command and a new uid/gid alongside a cleared environment -func transferRootful(source entities.ImageScpOptions, dest entities.ImageScpOptions, podman string, parentFlags []string) error { +func transferRootful(source entities.ScpTransferImageOptions, dest entities.ScpTransferImageOptions, podman string, parentFlags []string) error { basicCommand := make([]string, 0, len(parentFlags)+1) basicCommand = append(basicCommand, podman) basicCommand = append(basicCommand, parentFlags...) diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go index 9fa855035..ad3dc328b 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go @@ -24,7 +24,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) listImagesOptions.Filters = append(listImagesOptions.Filters, "intermediate=false") } - images, err := ir.Libpod.LibimageRuntime().ListImages(ctx, nil, listImagesOptions) + images, err := ir.Libpod.LibimageRuntime().ListImages(ctx, listImagesOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go index 812c8e1ae..253e17347 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go @@ -3,7 +3,6 @@ package abi import ( - "bytes" "context" "encoding/json" "errors" @@ -14,6 +13,7 @@ import ( "strings" "github.com/containers/common/libimage" + "github.com/containers/common/libimage/define" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/manifest" @@ -81,7 +81,7 @@ func (ir *ImageEngine) ManifestExists(ctx context.Context, name string) (*entiti } // ManifestInspect returns the content of a manifest list or image -func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts entities.ManifestInspectOptions) ([]byte, error) { +func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts entities.ManifestInspectOptions) (*define.ManifestListData, error) { // NOTE: we have to do a bit of a limbo here as `podman manifest // inspect foo` wants to do a remote-inspect of foo iff "foo" in the // containers storage is an ordinary image but not a manifest list. @@ -97,25 +97,12 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts en return nil, err } - schema2List, err := manifestList.Inspect() - if err != nil { - return nil, err - } - - rawSchema2List, err := json.Marshal(schema2List) - if err != nil { - return nil, err - } - - var b bytes.Buffer - if err := json.Indent(&b, rawSchema2List, "", " "); err != nil { - return nil, fmt.Errorf("rendering manifest %s for display: %w", name, err) - } - return b.Bytes(), nil + return manifestList.Inspect() } // inspect a remote manifest list. -func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string, opts entities.ManifestInspectOptions) ([]byte, error) { +func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string, opts entities.ManifestInspectOptions) (*define.ManifestListData, error) { + inspectList := define.ManifestListData{} sys := ir.Libpod.SystemContext() if opts.Authfile != "" { @@ -136,7 +123,6 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string, o latestErr error result []byte manType string - b bytes.Buffer ) appendErr := func(e error) { if latestErr == nil { @@ -183,27 +169,24 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string, o if err != nil { return nil, fmt.Errorf("parsing manifest blob %q as a %q: %w", string(result), manType, err) } + if result, err = schema2Manifest.Serialize(); err != nil { return nil, err } default: - listBlob, err := manifest.ListFromBlob(result, manType) + list, err := manifest.ListFromBlob(result, manType) if err != nil { return nil, fmt.Errorf("parsing manifest blob %q as a %q: %w", string(result), manType, err) } - list, err := listBlob.ConvertToMIMEType(manifest.DockerV2ListMediaType) - if err != nil { - return nil, err - } if result, err = list.Serialize(); err != nil { return nil, err } } - if err = json.Indent(&b, result, "", " "); err != nil { - return nil, fmt.Errorf("rendering manifest %s for display: %w", name, err) + if err := json.Unmarshal(result, &inspectList); err != nil { + return nil, err } - return b.Bytes(), nil + return &inspectList, nil } // ManifestAdd adds images to the manifest list diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go index e5c24b1d2..8319fd9ed 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go @@ -84,8 +84,11 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri ReadOnly: true, ReadWriteTmpFS: false, // No need to spin up slirp etc. - Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}}, - StopTimeout: rtc.Engine.StopTimeout, + Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}}, + StopTimeout: rtc.Engine.StopTimeout, + HealthLogDestination: define.DefaultHealthCheckLocalDestination, + HealthMaxLogCount: define.DefaultHealthMaxLogCount, + HealthMaxLogSize: define.DefaultHealthMaxLogSize, } // Create and fill out the runtime spec. @@ -391,6 +394,22 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options } notifyProxies = append(notifyProxies, proxies...) + report.Pods = append(report.Pods, r.Pods...) + validKinds++ + ranContainers = true + case "Job": + var jobYAML v1.Job + + if err := yaml.Unmarshal(document, &jobYAML); err != nil { + return nil, fmt.Errorf("unable to read YAML as Kube Job: %w", err) + } + + r, proxies, err := ic.playKubeJob(ctx, &jobYAML, options, &ipIndex, configMaps, serviceContainer) + if err != nil { + return nil, err + } + notifyProxies = append(notifyProxies, proxies...) + report.Pods = append(report.Pods, r.Pods...) validKinds++ ranContainers = true @@ -549,7 +568,35 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM return &report, proxies, nil } +func (ic *ContainerEngine) playKubeJob(ctx context.Context, jobYAML *v1.Job, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) { + var ( + jobName string + podSpec v1.PodTemplateSpec + report entities.PlayKubeReport + ) + + jobName = jobYAML.ObjectMeta.Name + if jobName == "" { + return nil, nil, errors.New("job does not have a name") + } + podSpec = jobYAML.Spec.Template + + podName := fmt.Sprintf("%s-pod", jobName) + podReport, proxies, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, jobYAML.Annotations, configMaps, serviceContainer) + if err != nil { + return nil, nil, fmt.Errorf("encountered while bringing up pod %s: %w", podName, err) + } + report.Pods = podReport.Pods + + return &report, proxies, nil +} + func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) { + cfg, err := ic.Libpod.GetConfigNoCopy() + if err != nil { + return nil, nil, err + } + var ( writer io.Writer playKubePod entities.PlayKubePod @@ -605,10 +652,15 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY if options.Userns == "" { if v, ok := annotations[define.UserNsAnnotation]; ok { options.Userns = v + } else if v, ok := annotations[define.UserNsAnnotation+"/"+podName]; ok { + options.Userns = v + } else if podYAML.Spec.HostUsers != nil && !*podYAML.Spec.HostUsers { + options.Userns = "auto" } else { options.Userns = "host" } - if podYAML.Spec.HostUsers != nil && !*podYAML.Spec.HostUsers { + // FIXME: how to deal with explicit mappings? + if options.Userns == "private" { options.Userns = "auto" } } else if podYAML.Spec.HostUsers != nil { @@ -751,6 +803,21 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY return nil, nil, err } } + } else if v.Type == kube.KubeVolumeTypeImage { + var cwd string + if options.ContextDir != "" { + cwd = options.ContextDir + } else { + cwd, err = os.Getwd() + if err != nil { + return nil, nil, err + } + } + + _, err := ic.buildOrPullImage(ctx, cwd, writer, v.Source, v.ImagePullPolicy, options) + if err != nil { + return nil, nil, err + } } } @@ -772,7 +839,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } if podOpt.Infra { - infraImage := util.DefaultContainerConfig().Engine.InfraImage + infraImage := cfg.Engine.InfraImage infraOptions := entities.NewInfraContainerCreateOptions() infraOptions.Hostname = podSpec.PodSpecGen.PodBasicConfig.Hostname infraOptions.ReadOnly = true @@ -845,11 +912,6 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } } - cfg, err := ic.Libpod.GetConfigNoCopy() - if err != nil { - return nil, nil, err - } - var readOnly types.OptionalBool if cfg.Containers.ReadOnly { readOnly = types.NewOptionalBool(cfg.Containers.ReadOnly) @@ -1124,19 +1186,18 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY return &report, sdNotifyProxies, nil } -// getImageAndLabelInfo returns the image information and how the image should be pulled plus as well as labels to be used for the container in the pod. -// Moved this to a separate function so that it can be used for both init and regular containers when playing a kube yaml. -func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, annotations map[string]string, writer io.Writer, container v1.Container, options entities.PlayKubeOptions) (*libimage.Image, map[string]string, error) { - // Contains all labels obtained from kube - labels := make(map[string]string) - var pulledImage *libimage.Image - buildFile, err := getBuildFile(container.Image, cwd) +// buildImageFromContainerfile builds the container image and returns its details if these conditions are met: +// - A folder with the name of the image exists in current directory +// - A Dockerfile or Containerfile exists in that folder +// - The image doesn't exist locally OR the user explicitly provided the option `--build` +func (ic *ContainerEngine) buildImageFromContainerfile(ctx context.Context, cwd string, writer io.Writer, image string, options entities.PlayKubeOptions) (*libimage.Image, error) { + buildFile, err := getBuildFile(image, cwd) if err != nil { - return nil, nil, err + return nil, err } - existsLocally, err := ic.Libpod.LibimageRuntime().Exists(container.Image) + existsLocally, err := ic.Libpod.LibimageRuntime().Exists(image) if err != nil { - return nil, nil, err + return nil, err } if (len(buildFile) > 0) && ((!existsLocally && options.Build != types.OptionalBoolFalse) || (options.Build == types.OptionalBoolTrue)) { buildOpts := new(buildahDefine.BuildOptions) @@ -1144,56 +1205,91 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, buildOpts.ConfigureNetwork = buildahDefine.NetworkDefault isolation, err := bparse.IsolationOption("") if err != nil { - return nil, nil, err + return nil, err } buildOpts.Isolation = isolation buildOpts.CommonBuildOpts = commonOpts buildOpts.SystemContext = options.SystemContext - buildOpts.Output = container.Image + buildOpts.Output = image buildOpts.ContextDirectory = filepath.Dir(buildFile) buildOpts.ReportWriter = writer if _, _, err := ic.Libpod.Build(ctx, *buildOpts, []string{buildFile}...); err != nil { - return nil, nil, err + return nil, err } - i, _, err := ic.Libpod.LibimageRuntime().LookupImage(container.Image, new(libimage.LookupImageOptions)) + builtImage, _, err := ic.Libpod.LibimageRuntime().LookupImage(image, new(libimage.LookupImageOptions)) if err != nil { - return nil, nil, err + return nil, err + } + return builtImage, nil + } + return nil, nil +} + +// pullImageWithPolicy invokes libimage.Pull() to pull an image with the given PullPolicy. +// If the PullPolicy is not set: +// - use PullPolicyNewer if the image tag is set to "latest" or is not set +// - use PullPolicyMissing the policy is set to PullPolicyNewer. +func (ic *ContainerEngine) pullImageWithPolicy(ctx context.Context, writer io.Writer, image string, policy v1.PullPolicy, options entities.PlayKubeOptions) (*libimage.Image, error) { + pullPolicy := config.PullPolicyMissing + if len(policy) > 0 { + // Make sure to lower the strings since K8s pull policy + // may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905). + rawPolicy := string(policy) + parsedPolicy, err := config.ParsePullPolicy(strings.ToLower(rawPolicy)) + if err != nil { + return nil, err } - pulledImage = i + pullPolicy = parsedPolicy } else { - pullPolicy := config.PullPolicyMissing - if len(container.ImagePullPolicy) > 0 { - // Make sure to lower the strings since K8s pull policy - // may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905). - rawPolicy := string(container.ImagePullPolicy) - pullPolicy, err = config.ParsePullPolicy(strings.ToLower(rawPolicy)) - if err != nil { - return nil, nil, err + if named, err := reference.ParseNamed(image); err == nil { + tagged, isTagged := named.(reference.NamedTagged) + if !isTagged || tagged.Tag() == "latest" { + // Make sure to always pull the latest image in case it got updated. + pullPolicy = config.PullPolicyNewer } - } else { - if named, err := reference.ParseNamed(container.Image); err == nil { - tagged, isTagged := named.(reference.NamedTagged) - if !isTagged || tagged.Tag() == "latest" { - // Make sure to always pull the latest image in case it got updated. - pullPolicy = config.PullPolicyNewer - } - } - } - // This ensures the image is the image store - pullOptions := &libimage.PullOptions{} - pullOptions.AuthFilePath = options.Authfile - pullOptions.CertDirPath = options.CertDir - pullOptions.SignaturePolicyPath = options.SignaturePolicy - pullOptions.Writer = writer - pullOptions.Username = options.Username - pullOptions.Password = options.Password - pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify - - pulledImages, err := ic.Libpod.LibimageRuntime().Pull(ctx, container.Image, pullPolicy, pullOptions) - if err != nil { - return nil, nil, err } - pulledImage = pulledImages[0] + } + // This ensures the image is the image store + pullOptions := &libimage.PullOptions{} + pullOptions.AuthFilePath = options.Authfile + pullOptions.CertDirPath = options.CertDir + pullOptions.SignaturePolicyPath = options.SignaturePolicy + pullOptions.Writer = writer + pullOptions.Username = options.Username + pullOptions.Password = options.Password + pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify + + pulledImages, err := ic.Libpod.LibimageRuntime().Pull(ctx, image, pullPolicy, pullOptions) + if err != nil { + return nil, err + } + return pulledImages[0], err +} + +// buildOrPullImage builds the image if a Containerfile is present in a directory +// with the name of the image. It pulls the image otherwise. It returns the image +// details. +func (ic *ContainerEngine) buildOrPullImage(ctx context.Context, cwd string, writer io.Writer, image string, policy v1.PullPolicy, options entities.PlayKubeOptions) (*libimage.Image, error) { + buildImage, err := ic.buildImageFromContainerfile(ctx, cwd, writer, image, options) + if err != nil { + return nil, err + } + if buildImage != nil { + return buildImage, nil + } else { + return ic.pullImageWithPolicy(ctx, writer, image, policy, options) + } +} + +// getImageAndLabelInfo returns the image information and how the image should be pulled plus as well as labels to be used for the container in the pod. +// Moved this to a separate function so that it can be used for both init and regular containers when playing a kube yaml. +func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, annotations map[string]string, writer io.Writer, container v1.Container, options entities.PlayKubeOptions) (*libimage.Image, map[string]string, error) { + // Contains all labels obtained from kube + labels := make(map[string]string) + + pulledImage, err := ic.buildOrPullImage(ctx, cwd, writer, container.Image, container.ImagePullPolicy, options) + if err != nil { + return nil, labels, err } // Handle kube annotations @@ -1497,7 +1593,7 @@ func sortKubeKinds(documentList [][]byte) ([][]byte, error) { } switch kind { - case "Pod", "Deployment", "DaemonSet": + case "Pod", "Deployment", "DaemonSet", "Job": sortedDocumentList = append(sortedDocumentList, document) default: sortedDocumentList = append([][]byte{document}, sortedDocumentList...) @@ -1628,6 +1724,15 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, opt } podName := fmt.Sprintf("%s-pod", deploymentName) podNames = append(podNames, podName) + case "Job": + var jobYAML v1.Job + + if err := yaml.Unmarshal(document, &jobYAML); err != nil { + return nil, fmt.Errorf("unable to read YAML as Kube Job: %w", err) + } + jobName := jobYAML.ObjectMeta.Name + podName := fmt.Sprintf("%s-pod", jobName) + podNames = append(podNames, podName) case "PersistentVolumeClaim": var pvcYAML v1.PersistentVolumeClaim if err := yaml.Unmarshal(document, &pvcYAML); err != nil { diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go index 440d81e48..88a6cf01a 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" @@ -48,19 +49,16 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { info.Host.RemoteSocket.Path = uri.Path } - uri, err := url.Parse(ic.Libpod.RemoteURI()) - if err != nil { - return nil, err - } - - if uri.Scheme == "unix" { - err := fileutils.Exists(uri.Path) + // check if the unix path exits, if not unix socket we always we assume it exists, i.e. tcp socket + path, found := strings.CutPrefix(info.Host.RemoteSocket.Path, "unix://") + if found { + err := fileutils.Exists(path) info.Host.RemoteSocket.Exists = err == nil } else { info.Host.RemoteSocket.Exists = true } - return info, err + return info, nil } // SystemPrune removes unused data from the system. Pruning pods, containers, networks, volumes and images. diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go index 7ecc29324..77c99559e 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go @@ -164,6 +164,9 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL for _, v := range vols { inspectOut, err := v.Inspect() if err != nil { + if errors.Is(err, define.ErrNoSuchVolume) { + continue + } return nil, err } config := entities.VolumeConfigResponse{ diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go index 6e8c6fea4..89c448673 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go @@ -10,13 +10,11 @@ import ( "reflect" "strconv" "strings" - "sync" "time" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/podman/v5/libpod/define" - "github.com/containers/podman/v5/libpod/events" "github.com/containers/podman/v5/pkg/api/handlers" "github.com/containers/podman/v5/pkg/bindings" "github.com/containers/podman/v5/pkg/bindings/containers" @@ -647,7 +645,7 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID s return sessionID, nil } -func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, sigProxy bool, input, output, errput *os.File) error { +func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, sigProxy bool, input, output, errput *os.File) (int, error) { if output == nil && errput == nil { fmt.Printf("%s\n", name) } @@ -671,6 +669,10 @@ func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, sigPro }() // Wait for the attach to actually happen before starting // the container. + + cancelCtx, cancel := context.WithCancel(ic.ClientCtx) + defer cancel() + var code int select { case <-attachReady: startOptions := new(containers.StartOptions) @@ -678,13 +680,21 @@ func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, sigPro startOptions.WithDetachKeys(*dk) } if err := containers.Start(ic.ClientCtx, name, startOptions); err != nil { - return err + return -1, err } + + // call wait immediately after start to avoid racing against container removal when it was created with --rm + exitCode, err := containers.Wait(cancelCtx, name, nil) + if err != nil { + return -1, err + } + code = int(exitCode) + case err := <-attachErr: - return err + return -1, err } // If attachReady happens first, wait for containers.Attach to complete - return <-attachErr + return code, <-attachErr } func logIfRmError(id string, err error, reports []*reports.RmReport) { @@ -742,7 +752,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri } ctrRunning := ctr.State == define.ContainerStateRunning.String() if options.Attach { - err = startAndAttach(ic, name, &options.DetachKeys, options.SigProxy, options.Stdin, options.Stdout, options.Stderr) + code, err := startAndAttach(ic, name, &options.DetachKeys, options.SigProxy, options.Stdin, options.Stdout, options.Stderr) if err == define.ErrDetach { // User manually detached // Exit cleanly immediately @@ -780,19 +790,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri }() } - exitCode, err := containers.Wait(ic.ClientCtx, name, nil) - if err == define.ErrNoSuchCtr { - // Check events - event, err := ic.GetLastContainerEvent(ctx, name, events.Exited) - if err != nil { - logrus.Errorf("Cannot get exit code: %v", err) - report.ExitCode = define.ExecErrorCodeNotFound - } else { - report.ExitCode = *event.ContainerExitCode - } - } else { - report.ExitCode = int(exitCode) - } + report.ExitCode = code reports = append(reports, &report) return reports, nil } @@ -904,7 +902,9 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta return err }) } - if err := startAndAttach(ic, con.ID, &opts.DetachKeys, opts.SigProxy, opts.InputStream, opts.OutputStream, opts.ErrorStream); err != nil { + + code, err := startAndAttach(ic, con.ID, &opts.DetachKeys, opts.SigProxy, opts.InputStream, opts.OutputStream, opts.ErrorStream) + if err != nil { if err == define.ErrDetach { return &report, nil } @@ -932,53 +932,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta }() } - // Wait - exitCode, waitErr := containers.Wait(ic.ClientCtx, con.ID, nil) - if waitErr == nil { - report.ExitCode = int(exitCode) - return &report, nil - } - - // Determine why the wait failed. If the container doesn't exist, - // consult the events. - if !errorhandling.Contains(waitErr, define.ErrNoSuchCtr) { - return &report, waitErr - } - - // Events - eventsChannel := make(chan *events.Event) - eventOptions := entities.EventsOptions{ - EventChan: eventsChannel, - Filter: []string{ - "type=container", - fmt.Sprintf("container=%s", con.ID), - fmt.Sprintf("event=%s", events.Exited), - }, - } - - var lastEvent *events.Event - var mutex sync.Mutex - mutex.Lock() - // Read the events. - go func() { - for e := range eventsChannel { - lastEvent = e - } - mutex.Unlock() - }() - - eventsErr := ic.Events(ctx, eventOptions) - - // Wait for all events to be read - mutex.Lock() - if eventsErr != nil || lastEvent == nil { - logrus.Errorf("Cannot get exit code: %v", err) - report.ExitCode = define.ExecErrorCodeNotFound - return &report, nil //nolint: nilerr - } - - report.ExitCode = *lastEvent.ContainerExitCode - return &report, err + report.ExitCode = code + return &report, nil } func (ic *ContainerEngine) Diff(ctx context.Context, namesOrIDs []string, opts entities.DiffOptions) (*entities.DiffReport, error) { diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go index d2c0e62ef..14af2e854 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go @@ -24,40 +24,10 @@ func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptio binChan := make(chan entities.Event) go func() { for e := range binChan { - opts.EventChan <- entities.ConvertToLibpodEvent(e) + opts.EventChan <- events.ReadResult{Event: entities.ConvertToLibpodEvent(e)} } close(opts.EventChan) }() options := new(system.EventsOptions).WithFilters(filters).WithSince(opts.Since).WithStream(opts.Stream).WithUntil(opts.Until) return system.Events(ic.ClientCtx, binChan, nil, options) } - -// GetLastContainerEvent takes a container name or ID and an event status and returns -// the last occurrence of the container event. -func (ic *ContainerEngine) GetLastContainerEvent(ctx context.Context, nameOrID string, containerEvent events.Status) (*events.Event, error) { - // check to make sure the event.Status is valid - if _, err := events.StringToStatus(containerEvent.String()); err != nil { - return nil, err - } - var event events.Event - return &event, nil - - /* - FIXME: We need new bindings for this section - filters := []string{ - fmt.Sprintf("container=%s", nameOrID), - fmt.Sprintf("event=%s", containerEvent), - "type=container", - } - - containerEvents, err := system.GetEvents(ctx, entities.EventsOptions{Filter: filters}) - if err != nil { - return nil, err - } - if len(containerEvents) < 1 { - return nil, fmt.Errorf("%s not found: %w", containerEvent.String(), events.ErrEventNotFound) - } - // return the last element in the slice - return containerEvents[len(containerEvents)-1], nil - */ -} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go index a60b67e9e..12ebe94f7 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go @@ -24,6 +24,13 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all, if all && len(namesOrIDs) > 0 { return nil, nil, errors.New("cannot look up containers and all") } + // short cut if not all, not filters and no names are given. This can happen with + // --ignore and --cidfile, https://github.com/containers/podman/issues/23554. + // In this case we have to do nothting and not even have to do a request + if !all && len(filters) == 0 && len(namesOrIDs) == 0 { + return nil, nil, nil + } + options := new(containers.ListOptions).WithAll(true).WithSync(true).WithFilters(filters) allContainers, err := containers.List(contextWithConnection, options) if err != nil { diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go index 0fb142b54..3fdce4696 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go @@ -12,7 +12,6 @@ import ( bdefine "github.com/containers/buildah/define" "github.com/containers/common/libimage/filter" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/podman/v5/libpod/define" @@ -102,7 +101,7 @@ func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOption f := strings.Split(filter, "=") filters[f[0]] = f[1:] } - options := new(images.PruneOptions).WithAll(opts.All).WithFilters(filters).WithExternal(opts.External) + options := new(images.PruneOptions).WithAll(opts.All).WithFilters(filters).WithExternal(opts.External).WithBuildCache(opts.BuildCache) reports, err := images.Prune(ir.ClientCtx, options) if err != nil { return nil, err @@ -415,22 +414,22 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie return nil, errors.New("not implemented yet") } -func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error { +func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, opts entities.ImageScpOptions) (*entities.ImageScpReport, error) { options := new(images.ScpOptions) var destination *string if len(dst) > 1 { destination = &dst } - options.Quiet = &quiet + options.Quiet = &opts.Quiet options.Destination = destination rep, err := images.Scp(ir.ClientCtx, &src, destination, *options) if err != nil { - return err + return nil, err } fmt.Println("Loaded Image(s):", rep.Id) - return nil + return &entities.ImageScpReport{}, nil } diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go index 243f4b6b5..49585a198 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go @@ -2,11 +2,11 @@ package tunnel import ( "context" - "encoding/json" "fmt" "slices" "strings" + "github.com/containers/common/libimage/define" "github.com/containers/image/v5/types" "github.com/containers/podman/v5/pkg/bindings/images" "github.com/containers/podman/v5/pkg/bindings/manifests" @@ -34,7 +34,7 @@ func (ir *ImageEngine) ManifestExists(ctx context.Context, name string) (*entiti } // ManifestInspect returns contents of manifest list with given name -func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts entities.ManifestInspectOptions) ([]byte, error) { +func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts entities.ManifestInspectOptions) (*define.ManifestListData, error) { options := new(manifests.InspectOptions).WithAuthfile(opts.Authfile) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { @@ -49,11 +49,7 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts en return nil, fmt.Errorf("getting content of manifest list or image %s: %w", name, err) } - buf, err := json.MarshalIndent(list, "", " ") - if err != nil { - return buf, fmt.Errorf("rendering manifest for display: %w", err) - } - return buf, err + return list, err } // ManifestAdd adds images to the manifest list diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go b/vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go index 93982be9c..58b20d819 100644 --- a/vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go @@ -17,23 +17,23 @@ import ( "github.com/sirupsen/logrus" ) -func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) (*entities.ImageLoadReport, *entities.ImageScpOptions, *entities.ImageScpOptions, []string, error) { - source := entities.ImageScpOptions{} - dest := entities.ImageScpOptions{} +func ExecuteTransfer(src, dst string, opts entities.ScpExecuteTransferOptions) (*entities.ScpExecuteTransferReport, error) { + source := entities.ScpTransferImageOptions{} + dest := entities.ScpTransferImageOptions{} sshInfo := entities.ImageScpConnections{} - report := entities.ImageLoadReport{Names: []string{}} + loadReport := entities.ScpLoadReport{Names: []string{}} podman, err := os.Executable() if err != nil { - return nil, nil, nil, nil, err + return nil, err } f, err := os.CreateTemp("", "podman") // open temp file for load/save output if err != nil { - return nil, nil, nil, nil, err + return nil, err } - locations := []*entities.ImageScpOptions{} + locations := []*entities.ScpTransferImageOptions{} cliConnections := []string{} args := []string{src} if len(dst) > 0 { @@ -42,7 +42,7 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode for _, arg := range args { loc, connect, err := ParseImageSCPArg(arg) if err != nil { - return nil, nil, nil, nil, err + return nil, err } locations = append(locations, loc) cliConnections = append(cliConnections, connect...) @@ -51,19 +51,19 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode switch { case len(locations) > 1: if err = ValidateSCPArgs(locations); err != nil { - return nil, nil, nil, nil, err + return nil, err } dest = *locations[1] case len(locations) == 1: switch { case len(locations[0].Image) == 0: - return nil, nil, nil, nil, fmt.Errorf("no source image specified: %w", define.ErrInvalidArg) + return nil, fmt.Errorf("no source image specified: %w", define.ErrInvalidArg) case len(locations[0].Image) > 0 && !locations[0].Remote && len(locations[0].User) == 0: // if we have podman image scp $IMAGE - return nil, nil, nil, nil, fmt.Errorf("must specify a destination: %w", define.ErrInvalidArg) + return nil, fmt.Errorf("must specify a destination: %w", define.ErrInvalidArg) } } - source.Quiet = quiet + source.Quiet = opts.Quiet source.File = f.Name() // after parsing the arguments, set the file for the save/load dest.File = source.File defer os.Remove(source.File) @@ -81,59 +81,83 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode cfg, err := config.Default() if err != nil { - return nil, nil, nil, nil, err + return nil, err } err = GetServiceInformation(&sshInfo, cliConnections, cfg) if err != nil { - return nil, nil, nil, nil, err + return nil, err } - saveCmd, loadCmd := CreateCommands(source, dest, parentFlags, podman) + createCommandOpts := entities.ScpCreateCommandsOptions{} + createCommandOpts.ParentFlags = opts.ParentFlags + createCommandOpts.Podman = podman + saveCmd, loadCmd := CreateCommands(source, dest, createCommandOpts) switch { case source.Remote: // if we want to load FROM the remote, dest can either be local or remote in this case - err = SaveToRemote(source.Image, source.File, "", sshInfo.URI[0], sshInfo.Identities[0], sshMode) + saveToRemoteOpts := entities.ScpSaveToRemoteOptions{} + saveToRemoteOpts.Image = source.Image + saveToRemoteOpts.LocalFile = source.File + saveToRemoteOpts.Tag = "" + saveToRemoteOpts.URL = sshInfo.URI[0] + saveToRemoteOpts.Iden = sshInfo.Identities[0] + saveToRemoteOpts.SSHMode = opts.SSHMode + _, err = SaveToRemote(saveToRemoteOpts) if err != nil { - return nil, nil, nil, nil, err + return nil, err } if dest.Remote { // we want to load remote -> remote, both source and dest are remote - rep, id, err := LoadToRemote(dest, dest.File, "", sshInfo.URI[1], sshInfo.Identities[1], sshMode) + loadToRemoteOpts := entities.ScpLoadToRemoteOptions{} + loadToRemoteOpts.Dest = dest + loadToRemoteOpts.LocalFile = dest.File + loadToRemoteOpts.Tag = "" + loadToRemoteOpts.URL = sshInfo.URI[1] + loadToRemoteOpts.Iden = sshInfo.Identities[1] + loadToRemoteOpts.SSHMode = opts.SSHMode + loadToRemoteRep, err := LoadToRemote(loadToRemoteOpts) if err != nil { - return nil, nil, nil, nil, err + return nil, err } - if len(rep) > 0 { - fmt.Println(rep) + if len(loadToRemoteRep.Response) > 0 { + fmt.Println(loadToRemoteRep.Response) } - if len(id) > 0 { - report.Names = append(report.Names, id) + if len(loadToRemoteRep.ID) > 0 { + loadReport.Names = append(loadReport.Names, loadToRemoteRep.ID) } break } id, err := ExecPodman(dest, podman, loadCmd) if err != nil { - return nil, nil, nil, nil, err + return nil, err } if len(id) > 0 { - report.Names = append(report.Names, id) + loadReport.Names = append(loadReport.Names, id) } case dest.Remote: // remote host load, implies source is local _, err = ExecPodman(dest, podman, saveCmd) if err != nil { - return nil, nil, nil, nil, err + return nil, err } - rep, id, err := LoadToRemote(dest, source.File, "", sshInfo.URI[0], sshInfo.Identities[0], sshMode) + loadToRemoteOpts := entities.ScpLoadToRemoteOptions{} + loadToRemoteOpts.Dest = dest + loadToRemoteOpts.LocalFile = source.File + loadToRemoteOpts.Tag = "" + loadToRemoteOpts.URL = sshInfo.URI[0] + loadToRemoteOpts.Iden = sshInfo.Identities[0] + loadToRemoteOpts.SSHMode = opts.SSHMode + loadToRemoteRep, err := LoadToRemote(loadToRemoteOpts) if err != nil { - return nil, nil, nil, nil, err + return nil, err } - if len(rep) > 0 { - fmt.Println(rep) + if len(loadToRemoteRep.Response) > 0 { + fmt.Println(loadToRemoteRep.Response) } - if len(id) > 0 { - report.Names = append(report.Names, id) + if len(loadToRemoteRep.ID) > 0 { + loadReport.Names = append(loadReport.Names, loadToRemoteRep.ID) } if err = os.Remove(source.File); err != nil { - return nil, nil, nil, nil, err + return nil, err } default: // else native load, both source and dest are local and transferring between users if source.User == "" { // source user has to be set, destination does not @@ -141,15 +165,21 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode if source.User == "" { u, err := user.Current() if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not obtain user, make sure the environmental variable $USER is set: %w", err) + return nil, fmt.Errorf("could not obtain user, make sure the environmental variable $USER is set: %w", err) } source.User = u.Username } } - return nil, &source, &dest, parentFlags, nil // transfer needs to be done in ABI due to cross issues + rep := entities.ScpExecuteTransferReport{} + rep.Source = &source + rep.Dest = &dest + rep.ParentFlags = opts.ParentFlags + return &rep, nil // transfer needs to be done in ABI due to cross issues } - return &report, nil, nil, nil, nil + rep := entities.ScpExecuteTransferReport{} + rep.LoadReport = &loadReport + return &rep, nil } // CreateSCPCommand takes an existing command, appends the given arguments and returns a configured podman command for image scp @@ -162,7 +192,7 @@ func CreateSCPCommand(cmd *exec.Cmd, command []string) *exec.Cmd { } // ScpTag is a helper function for native podman to tag an image after a local load from image SCP -func ScpTag(cmd *exec.Cmd, podman string, dest entities.ImageScpOptions) error { +func ScpTag(cmd *exec.Cmd, podman string, dest entities.ScpTransferImageOptions) error { cmd.Stdout = nil out, err := cmd.Output() // this function captures the output temporarily in order to execute the next command if err != nil { @@ -201,91 +231,88 @@ func LoginUser(user string) (*exec.Cmd, error) { return cmd, err } -// loadToRemote takes image and remote connection information. it connects to the specified client +// LoadToRemote takes image and remote connection information. it connects to the specified client // and copies the saved image dir over to the remote host and then loads it onto the machine -// returns a string containing output or an error -func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, url *url.URL, iden string, sshEngine ssh.EngineMode) (string, string, error) { +// returns a report containing ssh response string and the id of the loaded image, or an error +func LoadToRemote(opts entities.ScpLoadToRemoteOptions) (*entities.ScpLoadToRemoteReport, error) { port := 0 - urlPort := url.Port() + urlPort := opts.URL.Port() if urlPort != "" { var err error - port, err = strconv.Atoi(url.Port()) + port, err = strconv.Atoi(opts.URL.Port()) if err != nil { - return "", "", err + return nil, err } } - input, err := os.Open(localFile) + input, err := os.Open(opts.LocalFile) if err != nil { - return "", "", err + return nil, err } defer input.Close() - out, err := ssh.ExecWithInput(&ssh.ConnectionExecOptions{Host: url.String(), Identity: iden, Port: port, User: url.User, Args: []string{"podman", "image", "load"}}, sshEngine, input) + out, err := ssh.ExecWithInput(&ssh.ConnectionExecOptions{Host: opts.URL.String(), Identity: opts.Iden, Port: port, User: opts.URL.User, Args: []string{"podman", "image", "load"}}, opts.SSHMode, input) if err != nil { - return "", "", err + return nil, err } - if tag != "" { - return "", "", fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) + if opts.Tag != "" { + return nil, fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) } rep := strings.TrimSuffix(out, "\n") outArr := strings.Split(rep, " ") id := outArr[len(outArr)-1] - if len(dest.Tag) > 0 { // tag the remote image using the output ID - _, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.String(), Identity: iden, Port: port, User: url.User, Args: []string{"podman", "image", "tag", id, dest.Tag}}, sshEngine) - if err != nil { - return "", "", err - } + if len(opts.Dest.Tag) > 0 { // tag the remote image using the output ID + _, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: opts.URL.String(), Identity: opts.Iden, Port: port, User: opts.URL.User, Args: []string{"podman", "image", "tag", id, opts.Dest.Tag}}, opts.SSHMode) if err != nil { - return "", "", err + return nil, err } } - return rep, id, nil + return &entities.ScpLoadToRemoteReport{Response: rep, ID: id}, nil } -// saveToRemote takes image information and remote connection information. it connects to the specified client +// SaveToRemote takes image information and remote connection information. it connects to the specified client // and saves the specified image on the remote machine and then copies it to the specified local location // returns an error if one occurs. -func SaveToRemote(image, localFile string, tag string, uri *url.URL, iden string, sshEngine ssh.EngineMode) error { - if tag != "" { - return fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) +func SaveToRemote(opts entities.ScpSaveToRemoteOptions) (*entities.ScpSaveToRemoteReport, error) { + if opts.Tag != "" { + return nil, fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) } port := 0 - urlPort := uri.Port() + urlPort := opts.URL.Port() if urlPort != "" { var err error - port, err = strconv.Atoi(uri.Port()) + port, err = strconv.Atoi(opts.URL.Port()) if err != nil { - return err + return nil, err } } - remoteFile, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Identity: iden, Port: port, User: uri.User, Args: []string{"mktemp"}}, sshEngine) + remoteFile, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: opts.URL.String(), Identity: opts.Iden, Port: port, User: opts.URL.User, Args: []string{"mktemp"}}, opts.SSHMode) if err != nil { - return err + return nil, err } - _, err = ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Identity: iden, Port: port, User: uri.User, Args: []string{"podman", "image", "save", image, "--format", "oci-archive", "--output", remoteFile}}, sshEngine) + _, err = ssh.Exec(&ssh.ConnectionExecOptions{Host: opts.URL.String(), Identity: opts.Iden, Port: port, User: opts.URL.User, Args: []string{"podman", "image", "save", opts.Image, "--format", "oci-archive", "--output", remoteFile}}, opts.SSHMode) if err != nil { - return err + return nil, err } - opts := ssh.ConnectionScpOptions{User: uri.User, Identity: iden, Port: port, Source: "ssh://" + uri.User.String() + "@" + uri.Hostname() + ":" + remoteFile, Destination: localFile} - scpRep, err := ssh.Scp(&opts, sshEngine) + scpConnOpts := ssh.ConnectionScpOptions{User: opts.URL.User, Identity: opts.Iden, Port: port, Source: "ssh://" + opts.URL.User.String() + "@" + opts.URL.Hostname() + ":" + remoteFile, Destination: opts.LocalFile} + scpRep, err := ssh.Scp(&scpConnOpts, opts.SSHMode) if err != nil { - return err + return nil, err } - _, err = ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Identity: iden, Port: port, User: uri.User, Args: []string{"rm", scpRep}}, sshEngine) + _, err = ssh.Exec(&ssh.ConnectionExecOptions{Host: opts.URL.String(), Identity: opts.Iden, Port: port, User: opts.URL.User, Args: []string{"rm", scpRep}}, opts.SSHMode) if err != nil { logrus.Errorf("Removing file on endpoint: %v", err) } - return nil + return &entities.ScpSaveToRemoteReport{}, nil } // execPodman executes the podman save/load command given the podman binary -func ExecPodman(dest entities.ImageScpOptions, podman string, command []string) (string, error) { +func ExecPodman(dest entities.ScpTransferImageOptions, podman string, command []string) (string, error) { cmd := exec.Command(podman) CreateSCPCommand(cmd, command[1:]) logrus.Debugf("Executing podman command: %q", cmd) @@ -304,27 +331,27 @@ func ExecPodman(dest entities.ImageScpOptions, podman string, command []string) return "", cmd.Run() } -// createCommands forms the podman save and load commands used by SCP -func CreateCommands(source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string, podman string) ([]string, []string) { +// CreateCommands forms the podman save and load commands used by SCP +func CreateCommands(source entities.ScpTransferImageOptions, dest entities.ScpTransferImageOptions, opts entities.ScpCreateCommandsOptions) ([]string, []string) { var parentString string quiet := "" if source.Quiet { quiet = "-q " } - if len(parentFlags) > 0 { - parentString = strings.Join(parentFlags, " ") + " " // if there are parent args, an extra space needs to be added + if len(opts.ParentFlags) > 0 { + parentString = strings.Join(opts.ParentFlags, " ") + " " // if there are parent args, an extra space needs to be added } else { - parentString = strings.Join(parentFlags, " ") + parentString = strings.Join(opts.ParentFlags, " ") } - loadCmd := strings.Split(fmt.Sprintf("%s %sload %s--input %s", podman, parentString, quiet, dest.File), " ") - saveCmd := strings.Split(fmt.Sprintf("%s %vsave %s--output %s %s", podman, parentString, quiet, source.File, source.Image), " ") + loadCmd := strings.Split(fmt.Sprintf("%s %sload %s--input %s", opts.Podman, parentString, quiet, dest.File), " ") + saveCmd := strings.Split(fmt.Sprintf("%s %vsave %s--output %s %s", opts.Podman, parentString, quiet, source.File, source.Image), " ") return saveCmd, loadCmd } // parseImageSCPArg returns the valid connection, and source/destination data based off of the information provided by the user // arg is a string containing one of the cli arguments returned is a filled out source/destination options structs as well as a connections array and an error if applicable -func ParseImageSCPArg(arg string) (*entities.ImageScpOptions, []string, error) { - location := entities.ImageScpOptions{} +func ParseImageSCPArg(arg string) (*entities.ScpTransferImageOptions, []string, error) { + location := entities.ScpTransferImageOptions{} var err error cliConnections := []string{} @@ -349,7 +376,7 @@ func ParseImageSCPArg(arg string) (*entities.ImageScpOptions, []string, error) { return &location, cliConnections, nil } -func ValidateImagePortion(location entities.ImageScpOptions, arg string) (entities.ImageScpOptions, error) { +func ValidateImagePortion(location entities.ScpTransferImageOptions, arg string) (entities.ScpTransferImageOptions, error) { if RemoteArgLength(arg, 1) > 0 { before := strings.Split(arg, "::")[1] name := ValidateImageName(before) @@ -376,7 +403,7 @@ func ValidateImageName(input string) string { } // validateSCPArgs takes the array of source and destination options and checks for common errors -func ValidateSCPArgs(locations []*entities.ImageScpOptions) error { +func ValidateSCPArgs(locations []*entities.ScpTransferImageOptions) error { if len(locations) > 2 { return fmt.Errorf("cannot specify more than two arguments: %w", define.ErrInvalidArg) } diff --git a/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go index 9b79298a6..e326f0ee0 100644 --- a/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go @@ -62,6 +62,21 @@ type VolumeSource struct { // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // image represents a container image pulled and mounted on the host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: podman always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: podman never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: podman pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and the pod won't be created. + // The container image gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +optional + Image *ImageVolumeSource `json:"image,omitempty"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -465,6 +480,24 @@ type EmptyDirVolumeSource struct { SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` } +// ImageVolumeSource represents a image volume resource. +type ImageVolumeSource struct { + // Required: Container image reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Reference string `json:"reference,omitempty"` + + // Policy for pulling OCI objects. Possible values are: + // Always: podman always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: podman never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: podman pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + PullPolicy PullPolicy `json:"pullPolicy,omitempty"` +} + // SecretReference represents a Secret Reference. It has enough information to retrieve secret // in any namespace // +structType=atomic @@ -5176,3 +5209,629 @@ type DaemonSetList struct { // A list of daemon sets. Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Job represents the configuration of a single job. +type Job struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of a job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Current status of a job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JobList is a collection of jobs. +type JobList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of Jobs. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CompletionMode specifies how Pod completions of a Job are tracked. +// +enum +type CompletionMode string + +const ( + // NonIndexedCompletion is a Job completion mode. In this mode, the Job is + // considered complete when there have been .spec.completions + // successfully completed Pods. Pod completions are homologous to each other. + NonIndexedCompletion CompletionMode = "NonIndexed" + + // IndexedCompletion is a Job completion mode. In this mode, the Pods of a + // Job get an associated completion index from 0 to (.spec.completions - 1). + // The Job is considered complete when a Pod completes for each completion + // index. + IndexedCompletion CompletionMode = "Indexed" +) + +// PodFailurePolicyAction specifies how a Pod failure is handled. +// +enum +type PodFailurePolicyAction string + +const ( + // This is an action which might be taken on a pod failure - mark the + // pod's job as Failed and terminate all running pods. + PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob" + + // This is an action which might be taken on a pod failure - mark the + // Job's index as failed to avoid restarts within this index. This action + // can only be used when backoffLimitPerIndex is set. + // This value is beta-level. + PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" + + // This is an action which might be taken on a pod failure - the counter towards + // .backoffLimit, represented by the job's .status.failed field, is not + // incremented and a replacement pod is created. + PodFailurePolicyActionIgnore PodFailurePolicyAction = "Ignore" + + // This is an action which might be taken on a pod failure - the pod failure + // is handled in the default way - the counter towards .backoffLimit, + // represented by the job's .status.failed field, is incremented. + PodFailurePolicyActionCount PodFailurePolicyAction = "Count" +) + +// +enum +type PodFailurePolicyOnExitCodesOperator string + +const ( + PodFailurePolicyOnExitCodesOpIn PodFailurePolicyOnExitCodesOperator = "In" + PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn" +) + +// PodReplacementPolicy specifies the policy for creating pod replacements. +// +enum +type PodReplacementPolicy string + +const ( + // TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed" + // Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + Failed PodReplacementPolicy = "Failed" +) + +// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling +// a failed pod based on its container exit codes. In particular, it lookups the +// .state.terminated.exitCode for each app container and init container status, +// represented by the .status.containerStatuses and .status.initContainerStatuses +// fields in the Pod status, respectively. Containers completed with success +// (exit code 0) are excluded from the requirement check. +type PodFailurePolicyOnExitCodesRequirement struct { + // Restricts the check for exit codes to the container with the + // specified name. When null, the rule applies to all containers. + // When specified, it should match one the container or initContainer + // names in the pod template. + // +optional + ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + + // Represents the relationship between the container exit code(s) and the + // specified values. Containers completed with success (exit code 0) are + // excluded from the requirement check. Possible values are: + // + // - In: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is in the set of specified values. + // - NotIn: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is not in the set of specified values. + // Additional values are considered to be added in the future. Clients should + // react to an unknown operator by assuming the requirement is not satisfied. + Operator PodFailurePolicyOnExitCodesOperator `json:"operator" protobuf:"bytes,2,req,name=operator"` + + // Specifies the set of values. Each returned container exit code (might be + // multiple in case of multiple containers) is checked against this set of + // values with respect to the operator. The list of values must be ordered + // and must not contain duplicates. Value '0' cannot be used for the In operator. + // At least one element is required. At most 255 elements are allowed. + // +listType=set + Values []int32 `json:"values" protobuf:"varint,3,rep,name=values"` +} + +// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +// an actual pod condition type. +type PodFailurePolicyOnPodConditionsPattern struct { + // Specifies the required Pod condition type. To match a pod condition + // it is required that specified type equals the pod condition type. + Type PodConditionType `json:"type" protobuf:"bytes,1,req,name=type"` + + // Specifies the required Pod condition status. To match a pod condition + // it is required that the specified status equals the pod condition status. + // Defaults to True. + Status ConditionStatus `json:"status" protobuf:"bytes,2,req,name=status"` +} + +// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. +type PodFailurePolicyRule struct { + // Specifies the action taken on a pod failure when the requirements are satisfied. + // Possible values are: + // + // - FailJob: indicates that the pod's job is marked as Failed and all + // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + // - Ignore: indicates that the counter towards the .backoffLimit is not + // incremented and a replacement pod is created. + // - Count: indicates that the pod is handled in the default way - the + // counter towards the .backoffLimit is incremented. + // Additional values are considered to be added in the future. Clients should + // react to an unknown action by skipping the rule. + Action PodFailurePolicyAction `json:"action" protobuf:"bytes,1,req,name=action"` + + // Represents the requirement on the container exit codes. + // +optional + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"` + + // Represents the requirement on the pod conditions. The requirement is represented + // as a list of pod condition patterns. The requirement is satisfied if at + // least one pattern matches an actual pod condition. At most 20 elements are allowed. + // +listType=atomic + // +optional + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"` +} + +// PodFailurePolicy describes how failed pods influence the backoffLimit. +type PodFailurePolicy struct { + // A list of pod failure policy rules. The rules are evaluated in order. + // Once a rule matches a Pod failure, the remaining of the rules are ignored. + // When no rule matches the Pod failure, the default handling applies - the + // counter of pod failures is incremented and it is checked against + // the backoffLimit. At most 20 elements are allowed. + // +listType=atomic + Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` +} + +// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes. +type SuccessPolicy struct { + // rules represents the list of alternative rules for the declaring the Jobs + // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, + // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // The terminal state for such a Job has the "Complete" condition. + // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, + // other rules are ignored. At most 20 elements are allowed. + // +listType=atomic + Rules []SuccessPolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` +} + +// SuccessPolicyRule describes rule for declaring a Job as succeeded. +// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified. +type SuccessPolicyRule struct { + // succeededIndexes specifies the set of indexes + // which need to be contained in the actual set of the succeeded indexes for the Job. + // The list of indexes must be within 0 to ".spec.completions-1" and + // must not contain duplicates. At least one element is required. + // The indexes are represented as intervals separated by commas. + // The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. + // The number are listed in represented by the first and last element of the series, + // separated by a hyphen. + // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // When this field is null, this field doesn't default to any value + // and is never evaluated at any time. + // + // +optional + SucceededIndexes *string `json:"succeededIndexes,omitempty" protobuf:"bytes,1,opt,name=succeededIndexes"` + + // succeededCount specifies the minimal required size of the actual set of the succeeded indexes + // for the Job. When succeededCount is used along with succeededIndexes, the check is + // constrained only to the set of indexes specified by succeededIndexes. + // For example, given that succeededIndexes is "1-4", succeededCount is "3", + // and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded + // because only "1" and "3" indexes are considered in that rules. + // When this field is null, this doesn't default to any value and + // is never evaluated at any time. + // When specified it needs to be a positive integer. + // + // +optional + SucceededCount *int32 `json:"succeededCount,omitempty" protobuf:"varint,2,opt,name=succeededCount"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Specifies the desired number of successfully finished pods the + // job should be run with. Setting to null means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Specifies the duration in seconds relative to the startTime that the job + // may be continuously active before the system tries to terminate it; value + // must be positive integer. If a Job is suspended (at creation or through an + // update), this timer will effectively be stopped and reset when the Job is + // resumed again. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Specifies the policy of handling failed pods. In particular, it allows to + // specify the set of actions and conditions which need to be + // satisfied to take the associated action. + // If empty, the default behaviour applies - the counter of failed pods, + // represented by the jobs's .status.failed field, is incremented and it is + // checked against the backoffLimit. This field cannot be used in combination + // with restartPolicy=OnFailure. + // + // +optional + PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` + + // successPolicy specifies the policy when the Job can be declared as succeeded. + // If empty, the default behavior applies - the Job is declared as succeeded + // only when the number of succeeded pods equals to the completions. + // When the field is specified, it must be immutable and works only for the Indexed Jobs. + // Once the Job meets the SuccessPolicy, the lingering pods are terminated. + // + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). + // +optional + SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` + + // Specifies the number of retries before marking this job failed. + // Defaults to 6 + // +optional + BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` + + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` + + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed + // Optional number of failed pods to retain. + // +optional + // FailedPodsLimit *int32 `json:"failedPodsLimit,omitempty" protobuf:"varint,9,opt,name=failedPodsLimit"` + + // A label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // manualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector + // +optional + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + Template PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` + + // ttlSecondsAfterFinished limits the lifetime of a Job that has finished + // execution (either Complete or Failed). If this field is set, + // ttlSecondsAfterFinished after the Job finishes, it is eligible to be + // automatically deleted. When the Job is being deleted, its lifecycle + // guarantees (e.g. finalizers) will be honored. If this field is unset, + // the Job won't be automatically deleted. If this field is set to zero, + // the Job becomes eligible to be deleted immediately after it finishes. + // +optional + TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` + + // completionMode specifies how Pod completions are tracked. It can be + // `NonIndexed` (default) or `Indexed`. + // + // `NonIndexed` means that the Job is considered complete when there have + // been .spec.completions successfully completed Pods. Each Pod completion is + // homologous to each other. + // + // `Indexed` means that the Pods of a + // Job get an associated completion index from 0 to (.spec.completions - 1), + // available in the annotation batch.kubernetes.io/job-completion-index. + // The Job is considered complete when there is one successfully completed Pod + // for each index. + // When value is `Indexed`, .spec.completions must be specified and + // `.spec.parallelism` must be less than or equal to 10^5. + // In addition, The Pod name takes the form + // `$(job-name)-$(index)-$(random-string)`, + // the Pod hostname takes the form `$(job-name)-$(index)`. + // + // More completion modes can be added in the future. + // If the Job controller observes a mode that it doesn't recognize, which + // is possible during upgrades due to version skew, the controller + // skips updates for the Job. + // +optional + CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` + + // suspend specifies whether the Job controller should create Pods or not. If + // a Job is created with suspend set to true, no Pods are created by the Job + // controller. If a Job is suspended after creation (i.e. the flag goes from + // false to true), the Job controller will delete all active Pods associated + // with this Job. Users must design their workload to gracefully handle this. + // Suspending a Job will reset the StartTime field of the Job, effectively + // resetting the ActiveDeadlineSeconds timer too. Defaults to false. + // + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"` + + // podReplacementPolicy specifies when to create replacement Pods. + // Possible values are: + // - TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + // - Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + // + // When using podFailurePolicy, Failed is the the only allowed value. + // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + // This is on by default. + // +optional + PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + // The latest available observations of an object's current state. When a Job + // fails, one of the conditions will have type "Failed" and status true. When + // a Job is suspended, one of the conditions will have type "Suspended" and + // status true; when the Job is resumed, the status of this condition will + // become false. When a Job is completed, one of the conditions will have + // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=atomic + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // Represents time when the job controller started processing a job. When a + // Job is created in the suspended state, this field is not set until the + // first time it is resumed. This field is reset every time a Job is resumed + // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // Represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // The number of pending and running pods which are not terminating (without + // a deletionTimestamp). + // The value is zero for finished jobs. + // +optional + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. + // +optional + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // The number of pods which reached phase Failed. + // The value increases monotonically. + // +optional + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` + + // The number of pods which are terminating (in phase Pending or Running + // and have a deletionTimestamp). + // + // This field is beta-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (enabled by default). + // +optional + Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"` + + // completedIndexes holds the completed indexes when .spec.completionMode = + // "Indexed" in a text format. The indexes are represented as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // +optional + CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` + + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` + + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but + // the job controller hasn't yet accounted for in the status counters. + // + // The job controller creates pods with a finalizer. When a pod terminates + // (succeeded or failed), the controller does three steps to account for it + // in the job status: + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding + // counter. + // + // Old jobs might not be tracked using this field, in which case the field + // remains null. + // The structure is empty for finished jobs. + // +optional + UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` + + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). + Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` +} + +// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't +// been accounted in Job status counters. +type UncountedTerminatedPods struct { + // succeeded holds UIDs of succeeded Pods. + // +listType=set + // +optional + Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` + + // failed holds UIDs of failed Pods. + // +listType=set + // +optional + Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +type JobConditionType string + +// These are built-in conditions of a job. +const ( + // JobSuspended means the job has been suspended. + JobSuspended JobConditionType = "Suspended" + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" + // FailureTarget means the job is about to fail its execution. + JobFailureTarget JobConditionType = "FailureTarget" + // JobSuccessCriteriaMet means the Job has been succeeded. + JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet" +) + +const ( + // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to + // a failed pod matching a pod failure policy rule + // https://kep.k8s.io/3329 + JobReasonPodFailurePolicy string = "PodFailurePolicy" + // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of + // times higher than backOffLimit times. + JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded" + // JobReasponDeadlineExceeded means job duration is past ActiveDeadline + JobReasonDeadlineExceeded string = "DeadlineExceeded" + // JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded" + // JobReasonFailedIndexes means Job has failed indexes. + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonFailedIndexes string = "FailedIndexes" + // JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to + // a Job met successPolicy. + // https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonSuccessPolicy string = "SuccessPolicy" + // JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to + // a number of succeeded Job pods met completions. + // - https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonCompletionsReached string = "CompletionsReached" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} diff --git a/vendor/github.com/containers/podman/v5/pkg/ps/ps.go b/vendor/github.com/containers/podman/v5/pkg/ps/ps.go index 755e04c08..7a77621e7 100644 --- a/vendor/github.com/containers/podman/v5/pkg/ps/ps.go +++ b/vendor/github.com/containers/podman/v5/pkg/ps/ps.go @@ -122,6 +122,9 @@ func GetExternalContainerLists(runtime *libpod.Runtime) ([]entities.ListContaine switch { case errors.Is(err, types.ErrLoadError): continue + // Container could have been removed since listing + case errors.Is(err, types.ErrContainerUnknown): + continue case err != nil: return nil, err default: diff --git a/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go index 3ad390624..9ecc5df9e 100644 --- a/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go @@ -20,9 +20,9 @@ import ( "github.com/containers/storage/pkg/idtools" pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" + "github.com/moby/sys/capability" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go index f224453da..ef9aed24a 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go @@ -444,6 +444,10 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, containerID } } + specg.HealthLogDestination = conf.HealthLogDestination + specg.HealthMaxLogCount = conf.HealthMaxLogCount + specg.HealthMaxLogSize = conf.HealthMaxLogSize + specg.IDMappings = &conf.IDMappings specg.ContainerCreateCommand = conf.CreateCommand if len(specg.Rootfs) == 0 { diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go index 8d4029114..ca7721894 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go @@ -642,6 +642,10 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithHealthCheckOnFailureAction(s.ContainerHealthCheckConfig.HealthCheckOnFailureAction)) } + options = append(options, libpod.WithHealthCheckLogDestination(s.ContainerHealthCheckConfig.HealthLogDestination)) + options = append(options, libpod.WithHealthCheckMaxLogCount(s.ContainerHealthCheckConfig.HealthMaxLogCount)) + options = append(options, libpod.WithHealthCheckMaxLogSize(s.ContainerHealthCheckConfig.HealthMaxLogSize)) + if s.SdNotifyMode == define.SdNotifyModeHealthy && !healthCheckSet { return nil, fmt.Errorf("%w: sdnotify policy %q requires a healthcheck to be set", define.ErrInvalidArg, s.SdNotifyMode) } diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go index 1328323ce..b279f7a68 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go @@ -438,6 +438,9 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener } s.Annotations[define.KubeHealthCheckAnnotation] = "true" + s.HealthLogDestination = define.DefaultHealthCheckLocalDestination + s.HealthMaxLogCount = define.DefaultHealthMaxLogCount + s.HealthMaxLogSize = define.DefaultHealthMaxLogSize // Environment Variables envs := map[string]string{} @@ -567,6 +570,14 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener Source: define.TypeTmpfs, } s.Mounts = append(s.Mounts, memVolume) + case KubeVolumeTypeImage: + imageVolume := specgen.ImageVolume{ + Destination: volume.MountPath, + ReadWrite: false, + Source: volumeSource.Source, + SubPath: volume.SubPath, + } + s.ImageVolumes = append(s.ImageVolumes, &imageVolume) default: return nil, errors.New("unsupported volume source type") } diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go index f17ba962a..0ea7bb720 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go @@ -37,13 +37,14 @@ const ( KubeVolumeTypeSecret KubeVolumeTypeEmptyDir KubeVolumeTypeEmptyDirTmpfs + KubeVolumeTypeImage ) //nolint:revive type KubeVolume struct { // Type of volume to create Type KubeVolumeType - // Path for bind mount or volume name for named volume + // Path for bind mount, volume name for named volume or image name for image volume Source string // Items to add to a named volume created where the key is the file name and the value is the data // This is only used when there are volumes in the yaml that refer to a configmap @@ -56,6 +57,8 @@ type KubeVolume struct { // DefaultMode sets the permissions on files created for the volume // This is optional and defaults to 0644 DefaultMode int32 + // Used for volumes of type Image. Ignored for other volumes types. + ImagePullPolicy v1.PullPolicy } // Create a KubeVolume from an HostPathVolumeSource @@ -279,6 +282,14 @@ func VolumeFromEmptyDir(emptyDirVolumeSource *v1.EmptyDirVolumeSource, name stri } } +func VolumeFromImage(imageVolumeSource *v1.ImageVolumeSource, name string) (*KubeVolume, error) { + return &KubeVolume{ + Type: KubeVolumeTypeImage, + Source: imageVolumeSource.Reference, + ImagePullPolicy: imageVolumeSource.PullPolicy, + }, nil +} + // Create a KubeVolume from one of the supported VolumeSource func VolumeFromSource(volumeSource v1.VolumeSource, configMaps []v1.ConfigMap, secretsManager *secrets.SecretsManager, volName, mountLabel string) (*KubeVolume, error) { switch { @@ -292,6 +303,8 @@ func VolumeFromSource(volumeSource v1.VolumeSource, configMaps []v1.ConfigMap, s return VolumeFromSecret(volumeSource.Secret, secretsManager) case volumeSource.EmptyDir != nil: return VolumeFromEmptyDir(volumeSource.EmptyDir, volName) + case volumeSource.Image != nil: + return VolumeFromImage(volumeSource.Image, volName) default: return nil, errors.New("HostPath, ConfigMap, EmptyDir, Secret, and PersistentVolumeClaim are currently the only supported VolumeSource") } diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go index 8b8338dc0..5b0e90415 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go @@ -297,6 +297,13 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. postConfigureNetNS := needPostConfigureNetNS(s) + // Network + portMappings, expose, err := createPortMappings(s, imageData) + if err != nil { + return nil, err + } + toReturn = append(toReturn, libpod.WithExposedPorts(expose)) + switch s.NetNS.NSMode { case specgen.FromPod: if pod == nil || infraCtr == nil { @@ -316,28 +323,15 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. toReturn = append(toReturn, libpod.WithNetNSFrom(netCtr)) } case specgen.Slirp: - portMappings, expose, err := createPortMappings(s, imageData) - if err != nil { - return nil, err - } val := "slirp4netns" if s.NetNS.Value != "" { val = fmt.Sprintf("slirp4netns:%s", s.NetNS.Value) } - toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, val, nil)) + toReturn = append(toReturn, libpod.WithNetNS(portMappings, postConfigureNetNS, val, nil)) case specgen.Pasta: - portMappings, expose, err := createPortMappings(s, imageData) - if err != nil { - return nil, err - } val := "pasta" - toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, val, nil)) + toReturn = append(toReturn, libpod.WithNetNS(portMappings, postConfigureNetNS, val, nil)) case specgen.Bridge, specgen.Private, specgen.Default: - portMappings, expose, err := createPortMappings(s, imageData) - if err != nil { - return nil, err - } - rtConfig, err := rt.GetConfigNoCopy() if err != nil { return nil, err @@ -364,7 +358,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. s.Networks[rtConfig.Network.DefaultNetwork] = opts delete(s.Networks, "default") } - toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, "bridge", s.Networks)) + toReturn = append(toReturn, libpod.WithNetNS(portMappings, postConfigureNetNS, "bridge", s.Networks)) } if s.UseImageHosts != nil && *s.UseImageHosts { diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go index d6247bbf6..d19cf9c03 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go @@ -318,6 +318,10 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations[define.InspectAnnotationAutoremove] = define.InspectResponseTrue } + if s.RemoveImage != nil && *s.RemoveImage { + configSpec.Annotations[define.InspectAnnotationAutoremoveImage] = define.InspectResponseTrue + } + if len(s.VolumesFrom) > 0 { configSpec.Annotations[define.VolumesFromAnnotation] = strings.Join(s.VolumesFrom, ";") } diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go index c4c8dc451..9b01a747e 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go @@ -85,6 +85,12 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (_ *libpod.Pod, finalErr e // make sure of that here. p.PodSpecGen.InfraContainerSpec.ResourceLimits = nil p.PodSpecGen.InfraContainerSpec.WeightDevice = nil + + // Set default for HealthCheck + p.PodSpecGen.InfraContainerSpec.HealthLogDestination = define.DefaultHealthCheckLocalDestination + p.PodSpecGen.InfraContainerSpec.HealthMaxLogCount = define.DefaultHealthMaxLogCount + p.PodSpecGen.InfraContainerSpec.HealthMaxLogSize = define.DefaultHealthMaxLogSize + rtSpec, spec, opts, err := MakeContainer(context.Background(), rt, p.PodSpecGen.InfraContainerSpec, false, nil) if err != nil { return nil, err diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go index a67419d86..0ff3eadbe 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go @@ -212,7 +212,6 @@ func ParsePortMapping(portMappings []types.PortMapping, exposePorts map[uint16][ for hostIP, protoMap := range portMap { for protocol, ports := range protoMap { - ports := ports if len(ports) == 0 { continue } diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go b/vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go index e5b99f89d..13376b3ba 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go @@ -9,6 +9,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/cgroups" + "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/namespaces" "github.com/containers/podman/v5/pkg/util" @@ -333,14 +334,18 @@ func ParseUserNamespace(ns string) (Namespace, error) { // If the input is nil or empty it will use the default setting from containers.conf func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { var networkOptions map[string][]string + toReturn := Namespace{} // by default we try to use the containers.conf setting // if we get at least one value use this instead - ns := containerConfig.Containers.NetNS + cfg, err := config.Default() + if err != nil { + return toReturn, nil, nil, err + } + ns := cfg.Containers.NetNS if len(networks) > 0 { ns = networks[0] } - toReturn := Namespace{} podmanNetworks := make(map[string]types.PerNetworkOptions) switch { diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go b/vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go index 2f1c874ae..551f9eb18 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go @@ -3,15 +3,12 @@ package specgen import ( "errors" "fmt" - - "github.com/containers/podman/v5/pkg/util" ) var ( // ErrInvalidPodSpecConfig describes an error given when the podspecgenerator is invalid ErrInvalidPodSpecConfig = errors.New("invalid pod spec") // containerConfig has the default configurations defined in containers.conf - containerConfig = util.DefaultContainerConfig() ) func exclusivePodOptions(opt1, opt2 string) error { diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go index 00a208921..eca50d6cf 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go @@ -159,6 +159,12 @@ type ContainerBasicConfig struct { // and exits. // Optional. Remove *bool `json:"remove,omitempty"` + // RemoveImage indicates that the container should remove the image it + // was created from after it exits. + // Only allowed if Remove is set to true and Image, not Rootfs, is in + // use. + // Optional. + RemoveImage *bool `json:"removeImage,omitempty"` // ContainerCreateCommand is the command that was used to create this // container. // This will be shown in the output of Inspect() on the container, and @@ -207,11 +213,11 @@ type ContainerBasicConfig struct { // container. // Optional. EnvMerge []string `json:"envmerge,omitempty"` - // UnsetEnv unsets the specified default environment variables from the image or from buildin or containers.conf + // UnsetEnv unsets the specified default environment variables from the image or from built-in or containers.conf // Optional. UnsetEnv []string `json:"unsetenv,omitempty"` - // UnsetEnvAll unsetall default environment variables from the image or from buildin or containers.conf - // UnsetEnvAll unsets all default environment variables from the image or from buildin + // UnsetEnvAll unsetall default environment variables from the image or from built-in or containers.conf + // UnsetEnvAll unsets all default environment variables from the image or from built-in // Optional. UnsetEnvAll *bool `json:"unsetenvall,omitempty"` // Passwd is a container run option that determines if we are validating users/groups before running the container @@ -593,6 +599,14 @@ type ContainerHealthCheckConfig struct { // Requires that HealthConfig be set. // Optional. StartupHealthConfig *define.StartupHealthCheck `json:"startupHealthConfig,omitempty"` + // HealthLogDestination defines the destination where the log is stored + HealthLogDestination string `json:"healthLogDestination,omitempty"` + // HealthMaxLogCount is maximum number of attempts in the HealthCheck log file. + // ('0' value means an infinite number of attempts in the log file) + HealthMaxLogCount uint `json:"healthMaxLogCount,omitempty"` + // HealthMaxLogSize is the maximum length in characters of stored HealthCheck log + // ("0" value means an infinite log length) + HealthMaxLogSize uint `json:"healthMaxLogSize,omitempty"` } // SpecGenerator creates an OCI spec and Libpod configuration options to create @@ -665,13 +679,25 @@ func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator { } return &SpecGenerator{ ContainerStorageConfig: csc, + ContainerHealthCheckConfig: ContainerHealthCheckConfig{ + HealthLogDestination: define.DefaultHealthCheckLocalDestination, + HealthMaxLogCount: define.DefaultHealthMaxLogCount, + HealthMaxLogSize: define.DefaultHealthMaxLogSize, + }, } } // NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs func NewSpecGeneratorWithRootfs(rootfs string) *SpecGenerator { csc := ContainerStorageConfig{Rootfs: rootfs} - return &SpecGenerator{ContainerStorageConfig: csc} + return &SpecGenerator{ + ContainerStorageConfig: csc, + ContainerHealthCheckConfig: ContainerHealthCheckConfig{ + HealthLogDestination: define.DefaultHealthCheckLocalDestination, + HealthMaxLogCount: define.DefaultHealthMaxLogCount, + HealthMaxLogSize: define.DefaultHealthMaxLogSize, + }, + } } func StringSlicesEqual(a, b []string) bool { diff --git a/vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go index c9dc0775d..0bc9b419a 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go @@ -370,6 +370,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions } s.HealthCheckOnFailureAction = onFailureAction + s.HealthLogDestination = c.HealthLogDestination + + s.HealthMaxLogCount = c.HealthMaxLogCount + + s.HealthMaxLogSize = c.HealthMaxLogSize + if c.StartupHCCmd != "" { if c.NoHealthCheck { return errors.New("cannot specify both --no-healthcheck and --health-startup-cmd") @@ -516,6 +522,10 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.Annotations) == 0 { s.Annotations = annotations } + // Add the user namespace configuration to the annotations + if c.UserNS != "" { + s.Annotations[define.UserNsAnnotation] = c.UserNS + } if len(c.StorageOpts) > 0 { opts := make(map[string]string, len(c.StorageOpts)) diff --git a/vendor/github.com/containers/podman/v5/pkg/specgenutil/util.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/util.go index b5eb5d914..222e50f93 100644 --- a/vendor/github.com/containers/podman/v5/pkg/specgenutil/util.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgenutil/util.go @@ -261,7 +261,7 @@ func parseAndValidatePort(port string) (uint16, error) { return uint16(num), nil } -func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *config.Config, syslog, rm, exec bool) ([]string, error) { +func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *config.Config, syslog, rm, rmi, exec bool) ([]string, error) { // We need a cleanup process for containers in the current model. // But we can't assume that the caller is Podman - it could be another // user of the API. @@ -311,12 +311,18 @@ func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *conf command = append(command, "--module", module) } - command = append(command, []string{"container", "cleanup"}...) + // --stopped-only is used to ensure we only cleanup stopped containers and do not race + // against other processes that did a cleanup() + init() again before we had the chance to run + command = append(command, []string{"container", "cleanup", "--stopped-only"}...) if rm { command = append(command, "--rm") } + if rmi { + command = append(command, "--rmi") + } + // This has to be absolutely last, to ensure that the exec session ID // will be added after it by Libpod. if exec { diff --git a/vendor/github.com/containers/podman/v5/pkg/util/utils.go b/vendor/github.com/containers/podman/v5/pkg/util/utils.go index 602d12054..ddfda73d1 100644 --- a/vendor/github.com/containers/podman/v5/pkg/util/utils.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils.go @@ -17,7 +17,6 @@ import ( "time" "github.com/BurntSushi/toml" - "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/errorhandling" @@ -43,17 +42,6 @@ type idMapFlags struct { GroupMap bool // The "g" flag } -var containerConfig *config.Config - -func init() { - var err error - containerConfig, err = config.Default() - if err != nil { - logrus.Error(err) - os.Exit(1) - } -} - // Helper function to determine the username/password passed // in the creds string. It could be either or both. func parseCreds(creds string) (string, string) { @@ -84,7 +72,7 @@ func ParseDockerignore(containerfiles []string, root string) ([]string, string, // does not attempts to re-resolve it ignoreFile = path ignore, dockerIgnoreErr = os.ReadFile(path) - if os.IsNotExist(dockerIgnoreErr) { + if errors.Is(dockerIgnoreErr, fs.ErrNotExist) { // In this case either ignorefile was not found // or it is a symlink to unexpected file in such // case manually set ignorefile to `/dev/null` so @@ -1225,10 +1213,6 @@ func ValidateSysctls(strSlice []string) (map[string]string, error) { return sysctl, nil } -func DefaultContainerConfig() *config.Config { - return containerConfig -} - func CreateIDFile(path string, id string) error { idFile, err := os.Create(path) if err != nil { diff --git a/vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go index 85d819b9b..9130d1f40 100644 --- a/vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go @@ -191,6 +191,9 @@ func getDevices(path string) ([]spec.LinuxDevice, error) { default: sub, err := getDevices(filepath.Join(path, f.Name())) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + continue + } return nil, err } if sub != nil { @@ -209,7 +212,7 @@ func getDevices(path string) ([]spec.LinuxDevice, error) { if err == errNotADevice { continue } - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { continue } return nil, err diff --git a/vendor/github.com/containers/podman/v5/version/rawversion/version.go b/vendor/github.com/containers/podman/v5/version/rawversion/version.go index 3df0426b6..3fd7b6154 100644 --- a/vendor/github.com/containers/podman/v5/version/rawversion/version.go +++ b/vendor/github.com/containers/podman/v5/version/rawversion/version.go @@ -7,4 +7,4 @@ package rawversion // // NOTE: remember to bump the version at the top of the top-level README.md // file when this is bumped. -const RawVersion = "5.2.5" +const RawVersion = "5.3.0" diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 49a6e33b7..0fed08c3b 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20240529t141726z-f40f39d13" + IMAGE_SUFFIX: "c20241010t105554z-f40f39d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -120,7 +120,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.21 + image: golang modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -167,13 +167,26 @@ vendor_task: build_script: make vendor test_script: hack/tree_status.sh - cross_task: alias: cross container: - image: golang:1.21 + image: golang:1.22 build_script: make cross +gofix_task: + alias: gofix + container: + image: golang:1.22 + build_script: go fix ./... + test_script: git diff --exit-code + +codespell_task: + alias: codespell + container: + image: python + build_script: pip install codespell + test_script: codespell + # Status aggregator for all tests. This task simply ensures a defined # set of tasks all passed, and allows confirming that based on the status @@ -190,6 +203,8 @@ success_task: - meta - vendor - cross + - gofix + - codespell container: image: golang:1.21 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed diff --git a/vendor/github.com/containers/storage/.codespellrc b/vendor/github.com/containers/storage/.codespellrc new file mode 100644 index 000000000..2af969196 --- /dev/null +++ b/vendor/github.com/containers/storage/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = ./.git,./vendor,./tests/tools/vendor,AUTHORS +ignore-words-list = afile,flate,prevend,Plack,worl diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 96ea9f9a5..64c01b67f 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -32,6 +32,11 @@ BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) +# N/B: This value is managed by Renovate, manual changes are +# possible, as long as they don't disturb the formatting +# (i.e. DO NOT ADD A 'v' prefix!) +GOLANGCI_LINT_VERSION := 1.61.0 + default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs clean: ## remove all built files @@ -41,7 +46,7 @@ containers-storage: ## build using gc on the host $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w + codespell binary local-binary: containers-storage @@ -74,7 +79,7 @@ local-validate validate: install.tools ## validate DCO on the host @./hack/git-validation.sh install.tools: - $(MAKE) -C tests/tools + $(MAKE) -C tests/tools GOLANGCI_LINT_VERSION=$(GOLANGCI_LINT_VERSION) install.docs: docs $(MAKE) -C docs install diff --git a/vendor/github.com/containers/storage/OWNERS b/vendor/github.com/containers/storage/OWNERS index c169581de..a90f7e312 100644 --- a/vendor/github.com/containers/storage/OWNERS +++ b/vendor/github.com/containers/storage/OWNERS @@ -1,32 +1,14 @@ approvers: - - Luap99 - - TomSweeneyRedHat - - cevich - - edsantiago - - flouthoc - giuseppe - - haircommander - kolyshkin - - mrunalp - mtrmac - nalind - rhatdan - - saschagrunert - - umohnani8 - vrothberg reviewers: - - Luap99 + - Honny1 - TomSweeneyRedHat - - cevich - - edsantiago - flouthoc - - giuseppe - - haircommander - kolyshkin - mrunalp - - mtrmac - - nalind - - rhatdan - - saschagrunert - - umohnani8 - vrothberg diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 6570a6d0d..3ebf789f5 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.55.1 +1.56.0 diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go index 7176ba361..396648e7f 100644 --- a/vendor/github.com/containers/storage/check.go +++ b/vendor/github.com/containers/storage/check.go @@ -8,6 +8,7 @@ import ( "os" "path" "path/filepath" + "slices" "sort" "strings" "sync" @@ -769,12 +770,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error { return d } isUnaccounted := func(errs []error) bool { - for _, err := range errs { - if errors.Is(err, ErrLayerUnaccounted) { - return true - } - } - return false + return slices.ContainsFunc(errs, func(err error) bool { + return errors.Is(err, ErrLayerUnaccounted) + }) } sort.Slice(layersToDelete, func(i, j int) bool { // we've not heard of either of them, so remove them in the order the driver suggested @@ -1005,12 +1003,12 @@ func (c *checkDirectory) remove(path string) { func (c *checkDirectory) header(hdr *tar.Header) { name := path.Clean(hdr.Name) dir, base := path.Split(name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { + if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok { if base == archive.WhiteoutOpaqueDir { c.remove(path.Clean(dir)) c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) } else { - c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):])) + c.remove(path.Join(dir, file)) } } else { if hdr.Typeflag == tar.TypeLink { @@ -1044,7 +1042,7 @@ func (c *checkDirectory) header(hdr *tar.Header) { // headers updates a checkDirectory using information from the passed-in header slice func (c *checkDirectory) headers(hdrs []*tar.Header) { - hdrs = append([]*tar.Header{}, hdrs...) + hdrs = slices.Clone(hdrs) // sort the headers from the diff to ensure that whiteouts appear // before content when they both appear in the same directory, per // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index a7dfb405b..143fde297 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "sync" "time" @@ -162,17 +163,17 @@ type containerStore struct { func copyContainer(c *Container) *Container { return &Container{ ID: c.ID, - Names: copyStringSlice(c.Names), + Names: copySlicePreferringNil(c.Names), ImageID: c.ImageID, LayerID: c.LayerID, Metadata: c.Metadata, - BigDataNames: copyStringSlice(c.BigDataNames), - BigDataSizes: copyStringInt64Map(c.BigDataSizes), - BigDataDigests: copyStringDigestMap(c.BigDataDigests), + BigDataNames: copySlicePreferringNil(c.BigDataNames), + BigDataSizes: copyMapPreferringNil(c.BigDataSizes), + BigDataDigests: copyMapPreferringNil(c.BigDataDigests), Created: c.Created, - UIDMap: copyIDMap(c.UIDMap), - GIDMap: copyIDMap(c.GIDMap), - Flags: copyStringInterfaceMap(c.Flags), + UIDMap: copySlicePreferringNil(c.UIDMap), + GIDMap: copySlicePreferringNil(c.GIDMap), + Flags: copyMapPreferringNil(c.Flags), volatileStore: c.volatileStore, } } @@ -690,13 +691,13 @@ func (r *containerStore) create(id string, names []string, image, layer string, BigDataSizes: make(map[string]int64), BigDataDigests: make(map[string]digest.Digest), Created: time.Now().UTC(), - Flags: copyStringInterfaceMap(options.Flags), - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), + Flags: newMapFrom(options.Flags), + UIDMap: copySlicePreferringNil(options.UIDMap), + GIDMap: copySlicePreferringNil(options.GIDMap), volatileStore: options.Volatile, } if options.MountOpts != nil { - container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...) + container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts) } if options.Volatile { container.Flags[volatileFlag] = true @@ -788,13 +789,6 @@ func (r *containerStore) Delete(id string) error { return ErrContainerUnknown } id = container.ID - toDeleteIndex := -1 - for i, candidate := range r.containers { - if candidate.ID == id { - toDeleteIndex = i - break - } - } delete(r.byid, id) // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. @@ -803,14 +797,9 @@ func (r *containerStore) Delete(id string) error { for _, name := range container.Names { delete(r.byname, name) } - if toDeleteIndex != -1 { - // delete the container at toDeleteIndex - if toDeleteIndex == len(r.containers)-1 { - r.containers = r.containers[:len(r.containers)-1] - } else { - r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) - } - } + r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool { + return candidate.ID == id + }) if err := r.saveFor(container); err != nil { return err } @@ -916,7 +905,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { if !ok { return nil, ErrContainerUnknown } - return copyStringSlice(c.BigDataNames), nil + return copySlicePreferringNil(c.BigDataNames), nil } // Requires startWriting. @@ -948,14 +937,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } - addName := true - for _, name := range c.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { + if !slices.Contains(c.BigDataNames, key) { c.BigDataNames = append(c.BigDataNames, key) save = true } diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index c12d4ca54..a8312811b 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux /* diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go index 27e621633..9587bf63c 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package aufs diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go index 156f4a4f0..51b3d6dfa 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package aufs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 7a0f9cbc7..c7f37d97e 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go index c7d9d3b84..a4d77eaad 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -1,4 +1,3 @@ //go:build !linux || !cgo -// +build !linux !cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go index 5816139f3..4f5d8a5b9 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -1,5 +1,4 @@ //go:build linux && !btrfs_noversion && cgo -// +build linux,!btrfs_noversion,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go index a61d8fbd9..58c1b0d0c 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -1,5 +1,4 @@ //go:build linux && btrfs_noversion && cgo -// +build linux,btrfs_noversion,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go index d6150ceee..882fa04bd 100644 --- a/vendor/github.com/containers/storage/drivers/chown_darwin.go +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 42c12c627..808f4fea6 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 06ccf9fa4..6c2bd2ca2 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go index a65f0d58d..2aa3e9e6b 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_unix.go +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index 878940945..8c0bbed61 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -1,5 +1,4 @@ //go:build cgo -// +build cgo package copy @@ -17,7 +16,6 @@ import ( "errors" "fmt" "io" - "net" "os" "path/filepath" "strings" @@ -200,11 +198,9 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { } case mode&os.ModeSocket != 0: - s, err := net.Listen("unix", dstPath) - if err != nil { + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { return err } - s.Close() case mode&os.ModeDevice != 0: if unshare.IsRootless() { diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go index 5a4629b74..baaa86ddc 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || !cgo -// +build !linux !cgo package copy //nolint: predeclared diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index b37837b00..dc9b12c3c 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -189,14 +189,14 @@ type Driver interface { type DriverWithDifferOutput struct { Differ Differ Target string - Size int64 + Size int64 // Size of the uncompressed layer, -1 if unknown. Must be known if UncompressedDigest is set. UIDs []uint32 GIDs []uint32 UncompressedDigest digest.Digest CompressedDigest digest.Digest Metadata string BigData map[string][]byte - TarSplit []byte + TarSplit []byte // nil if not available TOCDigest digest.Digest // RootDirMode is the mode of the root directory of the layer, if specified. RootDirMode *os.FileMode @@ -254,8 +254,8 @@ type Differ interface { type DriverWithDiffer interface { Driver // ApplyDiffWithDiffer applies the changes using the callback function. - // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. - ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) + // The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory. ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors @@ -496,7 +496,7 @@ func driverPut(driver ProtoDriver, id string, mainErr *error) { if *mainErr == nil { *mainErr = err } else { - logrus.Errorf(err.Error()) + logrus.Error(err) } } } diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index ee0fc7bfc..d730dc38a 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 6b6373a37..47749c6ef 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -1,5 +1,4 @@ //go:build solaris && cgo -// +build solaris,cgo package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go index 7dfbef007..dcf169b4d 100644 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows && !freebsd && !solaris && !darwin -// +build !linux,!windows,!freebsd,!solaris,!darwin package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index fba9ec4fc..e500585ff 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -128,6 +128,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } layerFs, err := driver.Get(id, options) if err != nil { @@ -138,10 +139,6 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p parentFs := "" if parent != "" { - options := MountOpts{ - MountLabel: mountLabel, - Options: []string{"ro"}, - } parentFs, err = driver.Get(parent, options) if err != nil { return nil, err diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index d75c5fdf5..527701746 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go index bec455dd4..7867d5006 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check_116.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index 973cd786c..1fb3e62a4 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package overlay @@ -8,12 +7,12 @@ import ( "encoding/binary" "errors" "fmt" - "io/fs" "os" "os/exec" "path/filepath" "strings" "sync" + "sync/atomic" "github.com/containers/storage/pkg/chunked/dump" "github.com/containers/storage/pkg/fsverity" @@ -26,6 +25,10 @@ var ( composeFsHelperOnce sync.Once composeFsHelperPath string composeFsHelperErr error + + // skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel + // does not support it. Mounting directly via a file will be supported in kernel 6.12. + skipMountViaFile atomic.Bool ) func getComposeFsHelper() (string, error) { @@ -55,29 +58,26 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com return fmt.Errorf("failed to find mkcomposefs: %w", err) } - fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) + outFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o644) if err != nil { - return &fs.PathError{Op: "openat", Path: destFile, Err: err} + return err } - outFd := os.NewFile(uintptr(fd), "outFd") - fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0) + roFile, err := os.Open(fmt.Sprintf("/proc/self/fd/%d", outFile.Fd())) if err != nil { - outFd.Close() - return fmt.Errorf("failed to dup output file: %w", err) + outFile.Close() + return fmt.Errorf("failed to reopen %s as read-only: %w", destFile, err) } - newFd := os.NewFile(uintptr(fd), "newFd") - defer newFd.Close() err = func() error { - // a scope to close outFd before setting fsverity on the read-only fd. - defer outFd.Close() + // a scope to close outFile before setting fsverity on the read-only fd. + defer outFile.Close() errBuf := &bytes.Buffer{} - cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3") - cmd.ExtraFiles = []*os.File{outFd} + cmd := exec.Command(writerJson, "--from-file", "-", "-") cmd.Stderr = errBuf cmd.Stdin = dumpReader + cmd.Stdout = outFile if err := cmd.Run(); err != nil { rErr := fmt.Errorf("failed to convert json to erofs: %w", err) exitErr := &exec.ExitError{} @@ -92,7 +92,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com return err } - if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + if err := fsverity.EnableVerity("manifest file", int(roFile.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } @@ -114,45 +114,112 @@ struct lcfs_erofs_header_s { // hasACL returns true if the erofs blob has ACLs enabled func hasACL(path string) (bool, error) { - const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0) - - fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + const ( + LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0) + versionNumberSize = 4 + magicNumberSize = 4 + flagsSize = 4 + ) + + file, err := os.Open(path) if err != nil { - return false, &fs.PathError{Op: "openat", Path: path, Err: err} + return false, err } - defer unix.Close(fd) + defer file.Close() + // do not worry about checking the magic number, if the file is invalid // we will fail to mount it anyway - flags := make([]byte, 4) - nread, err := unix.Pread(fd, flags, 8) + buffer := make([]byte, versionNumberSize+magicNumberSize+flagsSize) + nread, err := file.Read(buffer) if err != nil { - return false, fmt.Errorf("pread %q: %w", path, err) + return false, err } - if nread != 4 { + if nread != len(buffer) { return false, fmt.Errorf("failed to read flags from %q", path) } + flags := buffer[versionNumberSize+magicNumberSize:] return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil } -func mountComposefsBlob(dataDir, mountPoint string) error { - blobFile := getComposefsBlob(dataDir) - loop, err := loopback.AttachLoopDeviceRO(blobFile) +func openBlobFile(blobFile string, hasACL, useLoopDevice bool) (int, error) { + if useLoopDevice { + loop, err := loopback.AttachLoopDeviceRO(blobFile) + if err != nil { + return -1, err + } + defer loop.Close() + + blobFile = loop.Name() + } + + fsfd, err := unix.Fsopen("erofs", 0) if err != nil { - return err + return -1, fmt.Errorf("failed to open erofs filesystem: %w", err) + } + defer unix.Close(fsfd) + + if err := unix.FsconfigSetString(fsfd, "source", blobFile); err != nil { + return -1, fmt.Errorf("failed to set source for erofs filesystem: %w", err) + } + + if err := unix.FsconfigSetFlag(fsfd, "ro"); err != nil { + return -1, fmt.Errorf("failed to set erofs filesystem read-only: %w", err) + } + + if !hasACL { + if err := unix.FsconfigSetFlag(fsfd, "noacl"); err != nil { + return -1, fmt.Errorf("failed to set noacl for erofs filesystem: %w", err) + } + } + + if err := unix.FsconfigCreate(fsfd); err != nil { + buffer := make([]byte, 4096) + if n, _ := unix.Read(fsfd, buffer); n > 0 { + return -1, fmt.Errorf("failed to create erofs filesystem: %s: %w", strings.TrimSuffix(string(buffer[:n]), "\n"), err) + } + return -1, fmt.Errorf("failed to create erofs filesystem: %w", err) + } + + mfd, err := unix.Fsmount(fsfd, 0, unix.MOUNT_ATTR_RDONLY) + if err != nil { + buffer := make([]byte, 4096) + if n, _ := unix.Read(fsfd, buffer); n > 0 { + return -1, fmt.Errorf("failed to mount erofs filesystem: %s: %w", string(buffer[:n]), err) + } + return -1, fmt.Errorf("failed to mount erofs filesystem: %w", err) } - defer loop.Close() + return mfd, nil +} + +func openComposefsMount(dataDir string) (int, error) { + blobFile := getComposefsBlob(dataDir) hasACL, err := hasACL(blobFile) if err != nil { - return err + return -1, err } - mountOpts := "ro" - if !hasACL { - mountOpts += ",noacl" + + if !skipMountViaFile.Load() { + fd, err := openBlobFile(blobFile, hasACL, false) + if err == nil || !errors.Is(err, unix.ENOTBLK) { + return fd, err + } + logrus.Debugf("The current kernel doesn't support mounting EROFS directly from a file, fallback to a loopback device") + skipMountViaFile.Store(true) + } + + return openBlobFile(blobFile, hasACL, true) +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + mfd, err := openComposefsMount(dataDir) + if err != nil { + return err } + defer unix.Close(mfd) - if err := unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts); err != nil { - return fmt.Errorf("failed to mount erofs image at %q: %w", mountPoint, err) + if err := unix.MoveMount(mfd, "", unix.AT_FDCWD, mountPoint, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil { + return fmt.Errorf("failed to move mount to %q: %w", mountPoint, err) } return nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go index bedda3507..ca32c62b6 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go +++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 8829e55e9..b3ddac022 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlay @@ -103,20 +102,20 @@ func mountOverlayFromMain() { // paths, but we don't want to mess with other options. var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string for _, arg := range strings.Split(options.Label, ",") { - kv := strings.SplitN(arg, "=", 2) - switch kv[0] { + key, val, _ := strings.Cut(arg, "=") + switch key { case "upperdir": upperk = "upperdir=" - upperv = kv[1] + upperv = val case "workdir": workk = "workdir=" - workv = kv[1] + workv = val case "lowerdir": lowerk = "lowerdir=" - lowerv = kv[1] + lowerv = val case "label": labelk = "label=" - labelv = kv[1] + labelv = val default: if others == "" { others = arg diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 8ed35745f..caf896463 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlay @@ -14,6 +13,7 @@ import ( "os/exec" "path" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -126,6 +126,7 @@ type Driver struct { naiveDiff graphdriver.DiffDriver supportsDType bool supportsVolatile *bool + supportsDataOnly *bool usingMetacopy bool usingComposefs bool @@ -158,30 +159,7 @@ func init() { } func hasMetacopyOption(opts []string) bool { - for _, s := range opts { - if s == "metacopy=on" { - return true - } - } - return false -} - -func stripOption(opts []string, option string) []string { - for i, s := range opts { - if s == option { - return stripOption(append(opts[:i], opts[i+1:]...), option) - } - } - return opts -} - -func hasVolatileOption(opts []string) bool { - for _, s := range opts { - if s == "volatile" { - return true - } - } - return false + return slices.Contains(opts, "metacopy=on") } func getMountProgramFlagFile(path string) string { @@ -294,6 +272,18 @@ func (d *Driver) getSupportsVolatile() (bool, error) { return supportsVolatile, nil } +func (d *Driver) getSupportsDataOnly() (bool, error) { + if d.supportsDataOnly != nil { + return *d.supportsDataOnly, nil + } + supportsDataOnly, err := supportsDataOnlyLayersCached(d.home, d.runhome) + if err != nil { + return false, err + } + d.supportsDataOnly = &supportsDataOnly + return supportsDataOnly, nil +} + // isNetworkFileSystem checks if the specified file system is supported by native overlay // as backing store when running in a user namespace. func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { @@ -382,13 +372,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if unshare.IsRootless() { return nil, fmt.Errorf("composefs is not supported in user namespaces") } - supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome) - if err != nil { - return nil, err - } - if !supportsDataOnly { - return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS) - } if _, err := getComposeFsHelper(); err != nil { return nil, fmt.Errorf("composefs helper program not found: %w", err) } @@ -606,7 +589,7 @@ func parseOptions(options []string) (*overlayOptions, error) { m := os.FileMode(mask) o.forceMask = &m default: - return nil, fmt.Errorf("overlay: Unknown option %s", key) + return nil, fmt.Errorf("overlay: unknown option %s", key) } } return o, nil @@ -891,11 +874,11 @@ func (d *Driver) pruneStagingDirectories() bool { anyPresent := false - homeStagingDir := filepath.Join(d.home, stagingDir) - dirs, err := os.ReadDir(homeStagingDir) + stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir) + dirs, err := os.ReadDir(stagingDirBase) if err == nil { for _, dir := range dirs { - stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name()) + stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name()) lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile)) if err != nil { anyPresent = true @@ -1227,17 +1210,22 @@ func (d *Driver) getAllImageStores() []string { return additionalImageStores } -func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { - var homedir string - - if useImageStore && d.imageStore != "" { - homedir = path.Join(d.imageStore, d.name) - } else { - homedir = d.home +// homeDirForImageStore returns the home directory to use when an image store is configured +func (d *Driver) homeDirForImageStore() string { + if d.imageStore != "" { + return path.Join(d.imageStore, d.name) } + // If there is not an image store configured, use the same + // store + return d.home +} +func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { + homedir := d.home + if useImageStore { + homedir = d.homeDirForImageStore() + } newpath := path.Join(homedir, id) - if err := fileutils.Exists(newpath); err != nil { for _, p := range d.getAllImageStores() { l := path.Join(p, d.name, id) @@ -1455,6 +1443,38 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if err := fileutils.Exists(dir); err != nil { return "", err } + if _, err := redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), true); err != nil { + return "", err + } + + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + + mergedDir := d.getMergedDir(id, dir, inAdditionalStore) + // Attempt to create the merged dir if it doesn't exist, but don't chown an already existing directory (it might be in an additional store) + if err := idtools.MkdirAllAndChownNew(mergedDir, 0o700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil && !os.IsExist(err) { + return "", err + } + + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + // Ignore EINVAL, it means the directory is not a mount point and it can happen + // if the current function fails before the mount point is created. + if !errors.Is(mntErr, unix.EINVAL) { + logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr) + } + } + } + } + }() readWrite := !inAdditionalStore @@ -1492,19 +1512,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if err := unix.Uname(&uts); err == nil { release = " " + string(uts.Release[:]) + " " + string(uts.Version[:]) } - logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release) + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel %s", release) } else { logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it") } } - optsList = stripOption(optsList, "metacopy=on") + optsList = slices.DeleteFunc(optsList, func(opt string) bool { + return opt == "metacopy=on" + }) } - for _, o := range optsList { - if o == "ro" { - readWrite = false - break - } + if slices.Contains(optsList, "ro") { + readWrite = false } lowers, err := os.ReadFile(path.Join(dir, lowerFile)) @@ -1560,7 +1579,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO }() composeFsLayers := []string{} - composeFsLayersDir := filepath.Join(dir, "composefs-layers") maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) if err := fileutils.Exists(composefsBlob); err != nil { @@ -1575,7 +1593,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", fmt.Errorf("cannot mount a composefs layer as writeable") } - dest := filepath.Join(composeFsLayersDir, fmt.Sprintf("%d", i)) + dest := d.getStorePrivateDirectory(id, dir, fmt.Sprintf("composefs-layers/%d", i), inAdditionalStore) if err := os.MkdirAll(dest, 0o700); err != nil { return "", err } @@ -1683,12 +1701,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "metacopy=on", "redirect_dir=on") } - // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) - if err != nil { - return "", err - } - if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) } @@ -1703,33 +1715,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } - mergedDir := d.getMergedDir(id, dir, inAdditionalStore) - // Attempt to create the merged dir only if it doesn't exist. - if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) { - if err := idtools.MkdirAllAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return "", err - } - } - if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil - } - defer func() { - if retErr != nil { - if c := d.ctr.Decrement(mergedDir); c <= 0 { - if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { - logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr) - } - } - } - }() - workdir := path.Join(dir, "work") if d.options.mountProgram == "" && unshare.IsRootless() { optsList = append(optsList, "userxattr") } - if options.Volatile && !hasVolatileOption(optsList) { + if options.Volatile && !slices.Contains(optsList, "volatile") { supported, err := d.getSupportsVolatile() if err != nil { return "", err @@ -1790,8 +1782,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lowerDirs := strings.Join(absLowers, ":") if len(composeFsLayers) > 0 { - composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::") - lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs + sep := "::" + supportsDataOnly, err := d.getSupportsDataOnly() + if err != nil { + return "", err + } + if !supportsDataOnly { + sep = ":" + } + composeFsLayersLowerDirs := strings.Join(composeFsLayers, sep) + lowerDirs = lowerDirs + sep + composeFsLayersLowerDirs } // absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent // its usage. @@ -1877,16 +1877,36 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return mergedDir, nil } -// getMergedDir returns the directory path that should be used as the mount point for the overlayfs. -func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string { - // If the layer is in an additional store, the lock we might hold only a reading lock. To prevent - // races with other processes, use a private directory under the main store rundir. At this point, the - // current process is holding an exclusive lock on the store, and since the rundir cannot be shared for - // different stores, it is safe to assume the current process has exclusive access to it. +// getStorePrivateDirectory returns a directory path for storing data that requires exclusive access. +// If 'inAdditionalStore' is true, the path will be under the rundir, otherwise it will be placed in +// the primary store. +func (d *Driver) getStorePrivateDirectory(id, layerDir, subdir string, inAdditionalStore bool) string { if inAdditionalStore { - return path.Join(d.runhome, id, "merged") + return path.Join(d.runhome, id, subdir) } - return path.Join(dir, "merged") + return path.Join(layerDir, subdir) +} + +// getMergedDir returns the directory path that should be used as the mount point for the overlayfs. +func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string { + // Ordinarily, .Get() (layer mounting) callers are supposed to guarantee exclusion. + // + // But additional stores are initialized with RO locks and don’t support a write + // lock operation at all; and naiveDiff operations cause mounts/unmounts, so they might + // happen on code paths where we might only holding a RO lock for the additional store. + // To prevent races with other processes mounting or unmounting the layer, + // use a private directory under the main store rundir, not the "merged" directory inside the + // original layer store holding the layer data. + // + // To support this, contrary to the _general_ locking rules for .Diff / .Changes (which allow a RO lock), + // the top-level Store implementation uses an exclusive lock for the primary layer store; + // and since the rundir cannot be shared for different stores, it is safe to assume the + // current process has exclusive access to it. + // + // TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store. + // (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize, + // but DiffSize probably needs to remain for computing sizes of container’s RW layers.) + return d.getStorePrivateDirectory(id, dir, "merged", inAdditionalStore) } // Put unmounts the mount path created for the give id. @@ -1934,7 +1954,7 @@ func (d *Driver) Put(id string) error { // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all // pending changes are propagated to the file system if !unmounted { - fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0) + fd, err := unix.Open(mountpoint, unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err == nil { if err := unix.Syncfs(fd); err != nil { logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) @@ -2094,9 +2114,14 @@ func (g *overlayFileGetter) Close() error { return errs.ErrorOrNil() } -func (d *Driver) getStagingDir(id string) string { - _, homedir, _ := d.dir2(id, d.imageStore != "") - return filepath.Join(homedir, stagingDir) +// newStagingDir creates a new staging directory and returns the path to it. +func (d *Driver) newStagingDir() (string, error) { + stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir) + err := os.MkdirAll(stagingDirBase, 0o700) + if err != nil && !os.IsExist(err) { + return "", err + } + return os.MkdirTemp(stagingDirBase, "") } // DiffGetter returns a FileGetCloser that can read files from the directory that @@ -2128,24 +2153,16 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) for _, diffDir := range diffDirs { // diffDir has the form $GRAPH_ROOT/overlay/$ID/diff, so grab the $ID from the parent directory id := path.Base(path.Dir(diffDir)) - composefsBlob := d.getComposefsData(id) - if fileutils.Exists(composefsBlob) != nil { + composefsData := d.getComposefsData(id) + if fileutils.Exists(composefsData) != nil { // not a composefs layer, ignore it continue } - dir, err := os.MkdirTemp(d.runhome, "composefs-mnt") + fd, err := openComposefsMount(composefsData) if err != nil { return nil, err } - if err := mountComposefsBlob(composefsBlob, dir); err != nil { - return nil, err - } - fd, err := os.Open(dir) - if err != nil { - return nil, err - } - composefsMounts[diffDir] = fd - _ = unix.Unmount(dir, unix.MNT_DETACH) + composefsMounts[diffDir] = os.NewFile(uintptr(fd), composefsData) } return &overlayFileGetter{diffDirs: diffDirs, composefsMounts: composefsMounts}, nil } @@ -2164,14 +2181,14 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { feature := "dataonly-layers" - overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + overlayCacheResult, _, err := cachedFeatureCheck(runhome, feature) if err == nil { if overlayCacheResult { logrus.Debugf("Cached value indicated that data-only layers for overlay are supported") return true, nil } logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported") - return false, errors.New(overlayCacheText) + return false, nil } supportsDataOnly, err := supportsDataOnlyLayers(home) if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil { @@ -2181,7 +2198,7 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { } // ApplyDiffWithDiffer applies the changes in the new layer using the specified function -func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) { +func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) { var idMappings *idtools.IDMappings var forceMask *os.FileMode @@ -2197,46 +2214,31 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App idMappings = &idtools.IDMappings{} } - var applyDir string - - if id == "" { - stagingDir := d.getStagingDir(id) - err := os.MkdirAll(stagingDir, 0o700) - if err != nil && !os.IsExist(err) { - return graphdriver.DriverWithDifferOutput{}, err - } - layerDir, err := os.MkdirTemp(stagingDir, "") - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } - perms := defaultPerms - if forceMask != nil { - perms = *forceMask - } - applyDir = filepath.Join(layerDir, "dir") - if err := os.Mkdir(applyDir, perms); err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } + layerDir, err := d.newStagingDir() + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + perms := defaultPerms + if forceMask != nil { + perms = *forceMask + } + applyDir := filepath.Join(layerDir, "dir") + if err := os.Mkdir(applyDir, perms); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } - lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile)) - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } - defer func() { - if errRet != nil { - delete(d.stagingDirsLocks, layerDir) - lock.Unlock() - } - }() - d.stagingDirsLocks[layerDir] = lock - lock.Lock() - } else { - var err error - applyDir, err = d.getDiffPath(id) - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } + lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile)) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err } + defer func() { + if errRet != nil { + delete(d.stagingDirsLocks, layerDir) + lock.Unlock() + } + }() + d.stagingDirsLocks[layerDir] = lock + lock.Lock() logrus.Debugf("Applying differ in %s", applyDir) @@ -2273,10 +2275,6 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr } }() - if filepath.Dir(parentStagingDir) != d.getStagingDir(id) { - return fmt.Errorf("%q is not a staging directory", stagingDirectory) - } - diffPath, err := d.getDiffPath(id) if err != nil { return err @@ -2361,7 +2359,7 @@ func (d *Driver) getComposefsData(id string) string { func (d *Driver) getDiffPath(id string) (string, error) { dir := d.dir(id) - return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) + return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), false) } func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { @@ -2370,7 +2368,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { return nil, err } for i, l := range layers { - layers[i], err = redirectDiffIfAdditionalLayer(l) + layers[i], err = redirectDiffIfAdditionalLayer(l, false) if err != nil { return nil, err } @@ -2713,12 +2711,17 @@ func notifyReleaseAdditionalLayer(al string) { // redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and // returns the redirected path. If the passed diff is not the one in Additional Layer // Store, it returns the original path without changes. -func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { +func redirectDiffIfAdditionalLayer(diffPath string, checkExistence bool) (string, error) { if ld, err := os.Readlink(diffPath); err == nil { // diff is the link to Additional Layer Store if !path.IsAbs(ld) { return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld) } + if checkExistence { + if err := fileutils.Exists(ld); err != nil { + return "", fmt.Errorf("failed to access to the linked additional layer: %w", err) + } + } diffPath = ld } else if err.(*os.PathError).Err != syscall.EINVAL { return "", err diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go index bc80301d4..39ca489f5 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go @@ -1,5 +1,4 @@ //go:build linux && cgo && !exclude_disk_quota -// +build linux,cgo,!exclude_disk_quota package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go index 1340c45b3..221006b28 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go @@ -1,6 +1,4 @@ //go:build linux && (!cgo || exclude_disk_quota) -// +build linux -// +build !cgo exclude_disk_quota package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index 0577711b3..a6df21d0f 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -1,5 +1,4 @@ //go:build linux && !cgo -// +build linux,!cgo package overlay @@ -7,6 +6,10 @@ import ( "fmt" ) +func openComposefsMount(dataDir string) (int, error) { + return 0, fmt.Errorf("composefs not supported on this build") +} + func getComposeFsHelper() (string, error) { return "", fmt.Errorf("composefs not supported on this build") } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go index 33b163a8c..b35633143 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go index 651990089..85045f66e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go index 2670ef3df..b5baa11f4 100644 --- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package overlayutils diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go index 92e4001d7..59ba5b0b2 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go @@ -1,5 +1,4 @@ //go:build linux && !exclude_disk_quota && cgo -// +build linux,!exclude_disk_quota,cgo // // projectquota.go - implements XFS project quota controls @@ -19,6 +18,16 @@ package quota #include #include +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif #ifndef FS_IOC_FSGETXATTR #define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) #endif @@ -163,6 +172,11 @@ func NewControl(basePath string) (*Control, error) { return nil, err } + // Clear inherit flag from top-level directory if necessary. + if err := stripProjectInherit(basePath); err != nil { + return nil, err + } + // // get first project id to be used for next container // @@ -340,6 +354,8 @@ func setProjectID(targetPath string, projectID uint32) error { } defer closeDir(dir) + logrus.Debugf("Setting quota project ID %d on %s", projectID, targetPath) + var fsx C.struct_fsxattr _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) @@ -347,6 +363,7 @@ func setProjectID(targetPath string, projectID uint32) error { return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) } fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { @@ -356,6 +373,36 @@ func setProjectID(targetPath string, projectID uint32) error { return nil } +// stripProjectInherit strips the project inherit flag from a directory. +// Used on the top-level directory to ensure project IDs are only inherited for +// files in directories we set quotas on - not the directories we want to set +// the quotas on, as that would make everything use the same project ID. +func stripProjectInherit(targetPath string) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to get xfs attrs for %s: %w", targetPath, errno) + } + if fsx.fsx_xflags&C.FS_XFLAG_PROJINHERIT != 0 { + // Flag is set, need to clear it. + logrus.Debugf("Clearing PROJINHERIT flag from directory %s", targetPath) + fsx.fsx_xflags = fsx.fsx_xflags &^ C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to clear PROJINHERIT for %s: %w", targetPath, errno) + } + } + return nil +} + // findNextProjectID - find the next project id to be used for containers // by scanning driver home directory to find used project ids func (q *Control) findNextProjectID() error { diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go index 648fd3379..fdc2ad161 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || exclude_disk_quota || !cgo -// +build !linux exclude_disk_quota !cgo package quota diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go index bbb9cb657..d95bd398b 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_aufs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -1,5 +1,4 @@ //go:build !exclude_graphdriver_aufs && linux -// +build !exclude_graphdriver_aufs,linux package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go index 425ebd798..01a322374 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -1,5 +1,4 @@ //go:build !exclude_graphdriver_btrfs && linux -// +build !exclude_graphdriver_btrfs,linux package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go index 95b77b73e..6e82a1ea3 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -1,5 +1,4 @@ //go:build !exclude_graphdriver_overlay && linux && cgo -// +build !exclude_graphdriver_overlay,linux,cgo package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index 8e5788a43..136848f4a 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,5 +1,4 @@ //go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go index d94756bdd..17e9d5870 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package vfs // import "github.com/containers/storage/drivers/vfs" diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 18f90fdc5..d38e74534 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -764,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, buf := bufio.NewWriter(nil) for err == nil { base := path.Base(hdr.Name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok { + name := path.Join(path.Dir(hdr.Name), rm) err = w.Remove(filepath.FromSlash(name)) if err != nil { return 0, err diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index 0e859a93b..ef26c6c7f 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package zfs @@ -393,12 +392,18 @@ func (d *Driver) Remove(id string) error { name := d.zfsPath(id) dataset := zfs.Dataset{Name: name} err := dataset.Destroy(zfs.DestroyRecursive) - if err == nil { - d.Lock() - delete(d.filesystemsCache, name) - d.Unlock() + if err != nil { + // We must be tolerant in case the image has already been removed, + // for example, accidentally by hand. + if _, err1 := zfs.GetDataset(name); err1 == nil { + return err + } + logrus.WithField("storage-driver", "zfs").Debugf("Layer %s has already been removed; ignore it and continue to delete the cache", id) } - return err + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + return nil } // Get returns the mountpoint for the given id after creating the target directories if necessary. diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go index 738b0ae1b..15a1447ac 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -1,4 +1,3 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package zfs diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index d71eab08b..5c9127ede 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "strings" "sync" "time" @@ -181,18 +182,18 @@ func copyImage(i *Image) *Image { return &Image{ ID: i.ID, Digest: i.Digest, - Digests: copyDigestSlice(i.Digests), - Names: copyStringSlice(i.Names), - NamesHistory: copyStringSlice(i.NamesHistory), + Digests: copySlicePreferringNil(i.Digests), + Names: copySlicePreferringNil(i.Names), + NamesHistory: copySlicePreferringNil(i.NamesHistory), TopLayer: i.TopLayer, - MappedTopLayers: copyStringSlice(i.MappedTopLayers), + MappedTopLayers: copySlicePreferringNil(i.MappedTopLayers), Metadata: i.Metadata, - BigDataNames: copyStringSlice(i.BigDataNames), - BigDataSizes: copyStringInt64Map(i.BigDataSizes), - BigDataDigests: copyStringDigestMap(i.BigDataDigests), + BigDataNames: copySlicePreferringNil(i.BigDataNames), + BigDataSizes: copyMapPreferringNil(i.BigDataSizes), + BigDataDigests: copyMapPreferringNil(i.BigDataDigests), Created: i.Created, ReadOnly: i.ReadOnly, - Flags: copyStringInterfaceMap(i.Flags), + Flags: copyMapPreferringNil(i.Flags), } } @@ -716,14 +717,14 @@ func (r *imageStore) create(id string, names []string, layer string, options Ima Digest: options.Digest, Digests: dedupeDigests(options.Digests), Names: names, - NamesHistory: copyStringSlice(options.NamesHistory), + NamesHistory: copySlicePreferringNil(options.NamesHistory), TopLayer: layer, Metadata: options.Metadata, BigDataNames: []string{}, BigDataSizes: make(map[string]int64), BigDataDigests: make(map[string]digest.Digest), Created: options.CreationDate, - Flags: copyStringInterfaceMap(options.Flags), + Flags: newMapFrom(options.Flags), } if image.Created.IsZero() { image.Created = time.Now().UTC() @@ -863,12 +864,6 @@ func (r *imageStore) Delete(id string) error { return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } id = image.ID - toDeleteIndex := -1 - for i, candidate := range r.images { - if candidate.ID == id { - toDeleteIndex = i - } - } delete(r.byid, id) // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. @@ -877,21 +872,18 @@ func (r *imageStore) Delete(id string) error { delete(r.byname, name) } for _, digest := range image.Digests { - prunedList := imageSliceWithoutValue(r.bydigest[digest], image) + prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool { + return i == image + }) if len(prunedList) == 0 { delete(r.bydigest, digest) } else { r.bydigest[digest] = prunedList } } - if toDeleteIndex != -1 { - // delete the image at toDeleteIndex - if toDeleteIndex == len(r.images)-1 { - r.images = r.images[:len(r.images)-1] - } else { - r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) - } - } + r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool { + return candidate.ID == id + }) if err := r.Save(); err != nil { return err } @@ -974,18 +966,7 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { if !ok { return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - return copyStringSlice(image.BigDataNames), nil -} - -func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { - modified := make([]*Image, 0, len(slice)) - for _, v := range slice { - if v == value { - continue - } - modified = append(modified, v) - } - return modified + return copySlicePreferringNil(image.BigDataNames), nil } // Requires startWriting. @@ -1037,21 +1018,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } - addName := true - for _, name := range image.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { + if !slices.Contains(image.BigDataNames, key) { image.BigDataNames = append(image.BigDataNames, key) save = true } for _, oldDigest := range image.Digests { // remove the image from the list of images in the digest-based index if list, ok := r.bydigest[oldDigest]; ok { - prunedList := imageSliceWithoutValue(list, image) + prunedList := slices.DeleteFunc(list, func(i *Image) bool { + return i == image + }) if len(prunedList) == 0 { delete(r.bydigest, oldDigest) } else { @@ -1066,9 +1042,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest // add the image to the list of images in the digest-based index which // corresponds to the new digest for this item, unless it's already there list := r.bydigest[newDigest] - if len(list) == len(imageSliceWithoutValue(list, image)) { - // the list isn't shortened by trying to prune this image from it, - // so it's not in there yet + if !slices.Contains(list, image) { r.bydigest[newDigest] = append(list, image) } } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 6caf28ab7..6fe1a0803 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -5,10 +5,12 @@ import ( "errors" "fmt" "io" + "maps" "os" "path" "path/filepath" "reflect" + "slices" "sort" "strings" "sync" @@ -134,9 +136,12 @@ type Layer struct { TOCDigest digest.Digest `json:"toc-digest,omitempty"` // UncompressedSize is the length of the blob that was last passed to - // ApplyDiff() or create(), after we decompressed it. If - // UncompressedDigest is not set, this should be treated as if it were - // an uninitialized value. + // ApplyDiff() or create(), after we decompressed it. + // + // - If UncompressedDigest is set, this must be set to a valid value. + // - Otherwise, if TOCDigest is set, this is either valid or -1. + // - If neither of this digests is set, this should be treated as if it were + // an uninitialized value. UncompressedSize int64 `json:"diff-size,omitempty"` // CompressionType is the type of compression which we detected on the blob @@ -312,9 +317,8 @@ type rwLayerStore interface { // applies its changes to a specified layer. ApplyDiff(to string, diff io.Reader) (int64, error) - // ApplyDiffWithDiffer applies the changes through the differ callback function. - // If to is the empty string, then a staging directory is created by the driver. - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + // applyDiffWithDifferNoLock applies the changes through the differ callback function. + applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error @@ -435,7 +439,7 @@ func layerLocation(l *Layer) layerLocations { func copyLayer(l *Layer) *Layer { return &Layer{ ID: l.ID, - Names: copyStringSlice(l.Names), + Names: copySlicePreferringNil(l.Names), Parent: l.Parent, Metadata: l.Metadata, MountLabel: l.MountLabel, @@ -450,12 +454,12 @@ func copyLayer(l *Layer) *Layer { CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, volatileStore: l.volatileStore, - BigDataNames: copyStringSlice(l.BigDataNames), - Flags: copyStringInterfaceMap(l.Flags), - UIDMap: copyIDMap(l.UIDMap), - GIDMap: copyIDMap(l.GIDMap), - UIDs: copyUint32Slice(l.UIDs), - GIDs: copyUint32Slice(l.GIDs), + BigDataNames: copySlicePreferringNil(l.BigDataNames), + Flags: copyMapPreferringNil(l.Flags), + UIDMap: copySlicePreferringNil(l.UIDMap), + GIDMap: copySlicePreferringNil(l.GIDMap), + UIDs: copySlicePreferringNil(l.UIDs), + GIDs: copySlicePreferringNil(l.GIDs), } } @@ -1213,8 +1217,8 @@ func (r *layerStore) Size(name string) (int64, error) { // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that // a zero value is not just present because it was never set to anything else (which can happen if the layer was // created by a version of this library that didn't keep track of digest and size information). - if layer.TOCDigest != "" || layer.UncompressedDigest != "" { - return layer.UncompressedSize, nil + if layer.UncompressedDigest != "" || layer.TOCDigest != "" { + return layer.UncompressedSize, nil // This may return -1 if only TOCDigest is set } return -1, nil } @@ -1372,7 +1376,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize templateCompressionType = templateLayer.CompressionType - templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs) templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID)) if err != nil && !errors.Is(err, os.ErrNotExist) { return nil, -1, err @@ -1402,9 +1406,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount CompressionType: templateCompressionType, UIDs: templateUIDs, GIDs: templateGIDs, - Flags: copyStringInterfaceMap(moreOptions.Flags), - UIDMap: copyIDMap(moreOptions.UIDMap), - GIDMap: copyIDMap(moreOptions.GIDMap), + Flags: newMapFrom(moreOptions.Flags), + UIDMap: copySlicePreferringNil(moreOptions.UIDMap), + GIDMap: copySlicePreferringNil(moreOptions.GIDMap), BigDataNames: []string{}, volatileStore: moreOptions.Volatile, } @@ -1564,19 +1568,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) // - r.bymount (via loadMounts / saveMounts) - // check whether options include ro option - hasReadOnlyOpt := func(opts []string) bool { - for _, item := range opts { - if item == "ro" { - return true - } - } - return false - } - // You are not allowed to mount layers from readonly stores if they // are not mounted read/only. - if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") { return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() @@ -1836,14 +1830,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error return fmt.Errorf("closing bigdata file for the layer: %w", err) } - addName := true - for _, name := range layer.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { + if !slices.Contains(layer.BigDataNames, key) { layer.BigDataNames = append(layer.BigDataNames, key) return r.saveFor(layer) } @@ -1856,7 +1843,7 @@ func (r *layerStore) BigDataNames(id string) ([]string, error) { if !ok { return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown) } - return copyStringSlice(layer.BigDataNames), nil + return copySlicePreferringNil(layer.BigDataNames), nil } // Requires startReading or startWriting. @@ -1938,32 +1925,13 @@ func (r *layerStore) deleteInternal(id string) error { delete(r.bymount, layer.MountPoint) } r.deleteInDigestMap(id) - toDeleteIndex := -1 - for i, candidate := range r.layers { - if candidate.ID == id { - toDeleteIndex = i - break - } - } - if toDeleteIndex != -1 { - // delete the layer at toDeleteIndex - if toDeleteIndex == len(r.layers)-1 { - r.layers = r.layers[:len(r.layers)-1] - } else { - r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) - } - } - if mountLabel != "" { - var found bool - for _, candidate := range r.layers { - if candidate.MountLabel == mountLabel { - found = true - break - } - } - if !found { - selinux.ReleaseLabel(mountLabel) - } + r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool { + return candidate.ID == id + }) + if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool { + return candidate.MountLabel == mountLabel + }) { + selinux.ReleaseLabel(mountLabel) } return nil } @@ -1971,21 +1939,15 @@ func (r *layerStore) deleteInternal(id string) error { // Requires startWriting. func (r *layerStore) deleteInDigestMap(id string) { for digest, layers := range r.bycompressedsum { - for i, layerID := range layers { - if layerID == id { - layers = append(layers[:i], layers[i+1:]...) - r.bycompressedsum[digest] = layers - break - } + if i := slices.Index(layers, id); i != -1 { + layers = slices.Delete(layers, i, i+1) + r.bycompressedsum[digest] = layers } } for digest, layers := range r.byuncompressedsum { - for i, layerID := range layers { - if layerID == id { - layers = append(layers[:i], layers[i+1:]...) - r.byuncompressedsum[digest] = layers - break - } + if i := slices.Index(layers, id); i != -1 { + layers = slices.Delete(layers, i, i+1) + r.byuncompressedsum[digest] = layers } } } @@ -2095,6 +2057,9 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { } // Requires startReading or startWriting. +// +// NOTE: Overlay’s implementation assumes use of an exclusive lock over the primary layer store, +// see drivers/overlay.Driver.getMergedDir. func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) if err != nil { @@ -2161,6 +2126,9 @@ func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteClo } // Requires startReading or startWriting. +// +// NOTE: Overlay’s implementation assumes use of an exclusive lock over the primary layer store, +// see drivers/overlay.Driver.getMergedDir. func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { var metadata storage.Unpacker @@ -2539,15 +2507,13 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } - for k, v := range options.Flags { - layer.Flags[k] = v - } + maps.Copy(layer.Flags, options.Flags) } if err = r.saveFor(layer); err != nil { return err } - if len(diffOutput.TarSplit) != 0 { + if diffOutput.TarSplit != nil { tsdata := bytes.Buffer{} compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) if err != nil { @@ -2579,37 +2545,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver return err } -// Requires startWriting. -func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { +// It must be called without any c/storage locks held to allow differ to make c/storage calls. +func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return nil, ErrNotSupported } - if to == "" { - output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ) - return &output, err - } - - layer, ok := r.lookup(to) - if !ok { - return nil, ErrLayerUnknown - } - if options == nil { - options = &drivers.ApplyDiffWithDifferOpts{ - ApplyDiffOpts: drivers.ApplyDiffOpts{ - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, - }, - } - } - output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) - if err != nil { - return nil, err - } - layer.UIDs = output.UIDs - layer.GIDs = output.GIDs - err = r.saveFor(layer) + output, err := ddriver.ApplyDiffWithDiffer(options, differ) return &output, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go index eab9da51a..db614cdd6 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -1,5 +1,4 @@ //go:build go1.10 -// +build go1.10 package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go index f591bf389..304464fe7 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -1,5 +1,4 @@ //go:build !go1.10 -// +build !go1.10 package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go index a754d0d4a..74e62331a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -1,5 +1,4 @@ //go:build netbsd || freebsd || darwin -// +build netbsd freebsd darwin package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index eae60a305..b9d718b60 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str } // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] + if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok { originalPath := filepath.Join(dir, originalBase) if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go index 2468ab3ca..b342ff75e 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index c6811031f..56f2086bc 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index 85a5b3a5d..6db31cf4c 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 448784549..3075c27bb 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -5,6 +5,7 @@ import ( "bytes" "fmt" "io" + "maps" "os" "path/filepath" "reflect" @@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { f := filepath.Base(path) // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] + if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok { return filepath.Join(filepath.Dir(path), originalFile), nil } @@ -319,9 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // otherwise any previous delete/change is considered recursive oldChildren := make(map[string]*FileInfo) if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } + maps.Copy(oldChildren, oldInfo.children) } for name, newChild := range info.children { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index ca272e68f..2965ccc9f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive @@ -31,7 +30,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool }() // block until both routines have returned - for i := 0; i < 2; i++ { + for range 2 { if err := <-errs; err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index 25429406f..fb2cb70c2 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go index d6c5fd98b..f57928244 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go index 92b8d05ed..4da1ced3e 100644 --- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package archive @@ -80,9 +79,9 @@ func parseFileFlags(fflags string) (uint32, uint32, error) { var set, clear uint32 = 0, 0 for _, fflag := range strings.Split(fflags, ",") { isClear := false - if strings.HasPrefix(fflag, "no") { + if clean, ok := strings.CutPrefix(fflag, "no"); ok { isClear = true - fflag = strings.TrimPrefix(fflag, "no") + fflag = clean } if value, ok := flagNameToValue[fflag]; ok { if isClear { diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go index 27a0bae2b..5ad480f7c 100644 --- a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go index 8db64e804..3555d753a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 5ff9f6b51..710167d24 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -83,6 +83,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions } } + destVal, err := newUnpackDestination(root, dest) + if err != nil { + return err + } + defer destVal.Close() + r := tarArchive if decompress { decompressedArchive, err := archive.DecompressStream(tarArchive) @@ -93,7 +99,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions r = decompressedArchive } - return invokeUnpack(r, dest, options, root) + return invokeUnpack(r, destVal, options) } // Tar tars the requested path while chrooted to the specified root. diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go index a0a578d3e..caf348493 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -6,12 +6,26 @@ import ( "github.com/containers/storage/pkg/archive" ) +type unpackDestination struct { + dest string +} + +func (dst *unpackDestination) Close() error { + return nil +} + +// newUnpackDestination is a no-op on this platform +func newUnpackDestination(root, dest string) (*unpackDestination, error) { + return &unpackDestination{ + dest: dest, + }, nil +} + func invokeUnpack(decompressedArchive io.Reader, - dest string, - options *archive.TarOptions, root string, + dest *unpackDestination, + options *archive.TarOptions, ) error { - _ = root // Restricting the operation to this root is not implemented on macOS - return archive.Unpack(decompressedArchive, dest, options) + return archive.Unpack(decompressedArchive, dest.dest, options) } func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 9907d24c3..88df9e56f 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin package chrootarchive @@ -9,15 +8,41 @@ import ( "flag" "fmt" "io" + "io/fs" "os" "path/filepath" "runtime" "strings" + "golang.org/x/sys/unix" + "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" ) +type unpackDestination struct { + root *os.File + dest string +} + +func (dst *unpackDestination) Close() error { + return dst.root.Close() +} + +// tarOptionsDescriptor is passed as an extra file +const tarOptionsDescriptor = 3 + +// rootFileDescriptor is passed as an extra file +const rootFileDescriptor = 4 + +// procPathForFd gives us a string for a descriptor. +// Note that while Linux supports actually *reading* this +// path, FreeBSD and other platforms don't; but in this codebase +// we only compare strings. +func procPathForFd(fd int) string { + return fmt.Sprintf("/proc/self/fd/%d", fd) +} + // untar is the entry-point for storage-untar on re-exec. This is not used on // Windows as it does not support chroot, hence no point sandboxing through // chroot and rexec. @@ -28,7 +53,7 @@ func untar() { var options archive.TarOptions // read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + if err := json.NewDecoder(os.NewFile(tarOptionsDescriptor, "options")).Decode(&options); err != nil { fatal(err) } @@ -38,7 +63,17 @@ func untar() { root = flag.Arg(1) } - if root == "" { + // FreeBSD doesn't have proc/self, but we can handle it here + if root == procPathForFd(rootFileDescriptor) { + // Take ownership to ensure it's closed; no need to leak + // this afterwards. + rootFd := os.NewFile(rootFileDescriptor, "tar-root") + defer rootFd.Close() + if err := unix.Fchdir(int(rootFd.Fd())); err != nil { + fatal(err) + } + root = "." + } else if root == "" { root = dst } @@ -57,11 +92,35 @@ func untar() { os.Exit(0) } -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { +// newUnpackDestination takes a root directory and a destination which +// must be underneath it, and returns an object that can unpack +// in the target root using a file descriptor. +func newUnpackDestination(root, dest string) (*unpackDestination, error) { if root == "" { - return errors.New("must specify a root to chroot to") + return nil, errors.New("must specify a root to chroot to") + } + relDest, err := filepath.Rel(root, dest) + if err != nil { + return nil, err + } + if relDest == "." { + relDest = "/" } + if relDest[0] != '/' { + relDest = "/" + relDest + } + + rootfdRaw, err := unix.Open(root, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, &fs.PathError{Op: "open", Path: root, Err: err} + } + return &unpackDestination{ + root: os.NewFile(uintptr(rootfdRaw), "rootfs"), + dest: relDest, + }, nil +} +func invokeUnpack(decompressedArchive io.Reader, dest *unpackDestination, options *archive.TarOptions) error { // We can't pass a potentially large exclude list directly via cmd line // because we easily overrun the kernel's max argument/environment size // when the full image list is passed (e.g. when this is used by @@ -72,24 +131,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T return fmt.Errorf("untar pipe failure: %w", err) } - if root != "" { - relDest, err := filepath.Rel(root, dest) - if err != nil { - return err - } - if relDest == "." { - relDest = "/" - } - if relDest[0] != '/' { - relDest = "/" + relDest - } - dest = relDest - } - - cmd := reexec.Command("storage-untar", dest, root) + cmd := reexec.Command("storage-untar", dest.dest, procPathForFd(rootFileDescriptor)) cmd.Stdin = decompressedArchive - cmd.ExtraFiles = append(cmd.ExtraFiles, r) + // If you change this, change tarOptionsDescriptor above + cmd.ExtraFiles = append(cmd.ExtraFiles, r) // fd 3 + // If you change this, change rootFileDescriptor above too + cmd.ExtraFiles = append(cmd.ExtraFiles, dest.root) // fd 4 output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go index 745502204..6611cbade 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -7,19 +7,34 @@ import ( "github.com/containers/storage/pkg/longpath" ) +type unpackDestination struct { + dest string +} + +func (dst *unpackDestination) Close() error { + return nil +} + +// newUnpackDestination is a no-op on this platform +func newUnpackDestination(root, dest string) (*unpackDestination, error) { + return &unpackDestination{ + dest: dest, + }, nil +} + // chroot is not supported by Windows func chroot(path string) error { return nil } func invokeUnpack(decompressedArchive io.Reader, - dest string, - options *archive.TarOptions, root string, + dest *unpackDestination, + options *archive.TarOptions, ) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest.dest), options) } func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 5b8acdaba..3ca99a2c2 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -8,7 +8,7 @@ import ( "path/filepath" "github.com/containers/storage/pkg/mount" - "github.com/syndtr/gocapability/capability" + "github.com/moby/sys/capability" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index b03e97460..0aab11d22 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !linux && !darwin -// +build !windows,!linux,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index cdcd9fdc3..ee215ce4f 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go index 274a946e2..0fd96190a 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go index 40eec4dc0..2245f894e 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index d49ddfed0..47742b05d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -65,11 +65,10 @@ type layer struct { } type layersCache struct { - layers []*layer - refs int - store storage.Store - mutex sync.RWMutex - created time.Time + layers []*layer + refs int + store storage.Store + mutex sync.RWMutex } var ( @@ -83,6 +82,7 @@ func (c *layer) release() { if err := unix.Munmap(c.mmapBuffer); err != nil { logrus.Warnf("Error Munmap: layer %q: %v", c.id, err) } + c.mmapBuffer = nil } } @@ -107,14 +107,13 @@ func (c *layersCache) release() { func getLayersCacheRef(store storage.Store) *layersCache { cacheMutex.Lock() defer cacheMutex.Unlock() - if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + if cache != nil && cache.store == store { cache.refs++ return cache } - cache := &layersCache{ - store: store, - refs: 1, - created: time.Now(), + cache = &layersCache{ + store: store, + refs: 1, } return cache } @@ -183,6 +182,9 @@ func makeBinaryDigest(stringDigest string) ([]byte, error) { return buf, nil } +// loadLayerCache attempts to load the cache file for the specified layer. +// If the cache file is not present or it it using a different cache file version, then +// the function returns (nil, nil). func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey) if err != nil && !errors.Is(err, os.ErrNotExist) { @@ -203,6 +205,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { if err != nil { return nil, err } + if cacheFile == nil { + return nil, nil + } return c.createLayer(layerID, cacheFile, mmapBuffer) } @@ -269,7 +274,7 @@ func (c *layersCache) load() error { var newLayers []*layer for _, r := range allLayers { // The layer is present in the store and it is already loaded. Attempt to - // re-use it if mmap'ed. + // reuse it if mmap'ed. if l, found := loadedLayers[r.ID]; found { // If the layer is not marked for re-load, move it to newLayers. if !l.reloadWithMmap { @@ -289,14 +294,16 @@ func (c *layersCache) load() error { } if r.ReadOnly { - // if the layer is coming from a read-only store, do not attempt + // If the layer is coming from a read-only store, do not attempt // to write to it. + // Therefore, we won’t find any matches in read-only-store layers, + // unless the read-only store layer comes prepopulated with cacheKey data. continue } // the cache file is either not present or broken. Try to generate it from the TOC. l, err = c.createCacheFileFromTOC(r.ID) - if err != nil { + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err) } if l != nil { @@ -617,6 +624,8 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin }, nil } +// readCacheFileFromMemory reads a cache file from a buffer. +// It can return (nil, nil) if the cache file uses a different file version that the one currently supported. func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { bigData := bytes.NewReader(bigDataBuffer) @@ -779,14 +788,14 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, return "", "", -1, nil } + c.mutex.RLock() + defer c.mutex.RUnlock() + binaryDigest, err := makeBinaryDigest(digest) if err != nil { return "", "", 0, err } - c.mutex.RLock() - defer c.mutex.RUnlock() - for _, layer := range c.layers { if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) { continue diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 633740a28..2dac46354 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -2,6 +2,7 @@ package chunked import ( archivetar "archive/tar" + "bytes" "errors" "fmt" "io" @@ -14,6 +15,8 @@ import ( "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" expMaps "golang.org/x/exp/maps" ) @@ -136,7 +139,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, } // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. -// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset). +// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset). func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) { offsetMetadata := annotations[internal.ManifestInfoKey] if offsetMetadata == "" { @@ -211,7 +214,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err) } - decodedTarSplit := []byte{} + var decodedTarSplit []byte = nil if toc.TarSplitDigest != "" { if tarSplitChunk.Offset <= 0 { return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey) @@ -256,7 +259,8 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { } } - if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error { + unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) + if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error { e, ok := pendingFiles[hdr.Name] if !ok { return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name) @@ -284,6 +288,36 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { return nil } +// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata +func tarSizeFromTarSplit(tarSplit []byte) (int64, error) { + var res int64 = 0 + + unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) + for { + entry, err := unpacker.Next() + if err != nil { + if err == io.EOF { + break + } + return -1, fmt.Errorf("reading tar-split entries: %w", err) + } + switch entry.Type { + case storage.SegmentType: + res += int64(len(entry.Payload)) + case storage.FileType: + // entry.Size is the “logical size”, which might not be the physical size for sparse entries; + // but the way tar-split/tar/asm.WriteOutputTarStream combines FileType entries and returned files contents, + // sparse files are not supported. + // Also https://github.com/opencontainers/image-spec/blob/main/layer.md says + // > Sparse files SHOULD NOT be used because they lack consistent support across tar implementations. + res += entry.Size + default: + return -1, fmt.Errorf("unexpected tar-split entry type %q", entry.Type) + } + } + return res, nil +} + // ensureTimePointersMatch ensures that a and b are equal func ensureTimePointersMatch(a, b *time.Time) error { // We didn’t always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil. diff --git a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go index 4fc4864a2..e26e5e5c5 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go @@ -174,7 +174,7 @@ func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMeta if usePath { dirName := filepath.Dir(metadata.Name) if dirName != "" { - parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY, 0) + parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return err } @@ -402,7 +402,7 @@ func openFileUnderRoot(dirfd int, name string, flags uint64, mode os.FileMode) ( // name is the path to open relative to dirfd. // mode specifies the mode to use for newly created files. func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.File, error) { - fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, 0) + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0) if err == nil { return os.NewFile(uintptr(fd), name), nil } @@ -422,7 +422,7 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil return nil, &fs.PathError{Op: "mkdirat", Path: name, Err: err2} } - fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, 0) + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0) if err == nil { return os.NewFile(uintptr(fd), name), nil } @@ -465,7 +465,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, } } - file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY, 0) + file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { return err } @@ -475,7 +475,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, } func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { - sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) + sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go index 752ee2520..cc9ee85bf 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go @@ -23,3 +23,22 @@ type ErrBadRequest struct { //nolint: errname func (e ErrBadRequest) Error() string { return "bad request" } + +// ErrFallbackToOrdinaryLayerDownload is a custom error type that +// suggests to the caller that a fallback mechanism can be used +// instead of a hard failure. +type ErrFallbackToOrdinaryLayerDownload struct { + Err error +} + +func (c ErrFallbackToOrdinaryLayerDownload) Error() string { + return c.Err.Error() +} + +func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error { + return c.Err +} + +func newErrFallbackToOrdinaryLayerDownload(err error) error { + return ErrFallbackToOrdinaryLayerDownload{Err: err} +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 403d7d5aa..8ecbfb982 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -89,7 +89,8 @@ type chunkedDiffer struct { // is no TOC referenced by the manifest. blobDigest digest.Digest - blobSize int64 + blobSize int64 + uncompressedTarSize int64 // -1 if unknown pullOptions map[string]string @@ -143,43 +144,98 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can +// retry the operation with a different method. func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { pullOptions := store.PullOptions() - if !parseBooleanPullOption(pullOptions, "enable_partial_images", true) { - return nil, errors.New("enable_partial_images not configured") + if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) { + // If convertImages is set, the two options disagree whether fallback is permissible. + // Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected. + return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled")) } + // convertImages also serves as a “must not fallback to non-partial pull” option (?!) + convertImages := parseBooleanPullOption(pullOptions, "convert_images", false) + graphDriver, err := store.GraphDriver() + if err != nil { + return nil, err + } + if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported { + if convertImages { + return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String()) + } + return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String())) + } + + differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions) + if err != nil { + if !canFallback { + return nil, err + } + // If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method. + if convertImages { + logrus.Debugf("Created differ to convert blob %q", blobDigest) + return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions) + } + return nil, newErrFallbackToOrdinaryLayerDownload(err) + } + + return differ, nil +} + +// getProperDiffer is an implementation detail of GetDiffer. +// It returns a “proper” differ (not a convert_images one) if possible. +// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull) +// is permissible. +func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) { zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] - if hasZstdChunkedTOC && hasEstargzTOC { - return nil, errors.New("both zstd:chunked and eStargz TOC found") - } + switch { + case hasZstdChunkedTOC && hasEstargzTOC: + return nil, false, errors.New("both zstd:chunked and eStargz TOC found") - if hasZstdChunkedTOC { + case hasZstdChunkedTOC: zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString) if err != nil { - return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err) + return nil, false, err } - return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions) - } - if hasEstargzTOC { + differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions) + if err != nil { + logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err) + // If the error is a bad request to the server, then signal to the caller that it can try a different method. + var badRequestErr ErrBadRequest + return nil, errors.As(err, &badRequestErr), err + } + logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest) + return differ, false, nil + + case hasEstargzTOC: estargzTOCDigest, err := digest.Parse(estargzTOCDigestString) if err != nil { - return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err) + return nil, false, err } - return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions) - } + differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions) + if err != nil { + logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err) + // If the error is a bad request to the server, then signal to the caller that it can try a different method. + var badRequestErr ErrBadRequest + return nil, errors.As(err, &badRequestErr), err + } + logrus.Debugf("Created eStargz differ for blob %q", blobDigest) + return differ, false, nil - return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions) + default: // no TOC + convertImages := parseBooleanPullOption(pullOptions, "convert_images", false) + if !convertImages { + return nil, true, errors.New("no TOC found and convert_images is not configured") + } + return nil, true, errors.New("no TOC found") + } } func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { - if !parseBooleanPullOption(pullOptions, "convert_images", false) { - return nil, errors.New("convert_images not configured") - } - layersCache, err := getLayersCache(store) if err != nil { return nil, err @@ -189,6 +245,7 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo fsVerityDigests: make(map[string]string), blobDigest: blobDigest, blobSize: blobSize, + uncompressedTarSize: -1, // Will be computed later convertToZstdChunked: true, copyBuffer: makeCopyBuffer(), layersCache: layersCache, @@ -202,24 +259,33 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } + var uncompressedTarSize int64 = -1 + if tarSplit != nil { + uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit) + if err != nil { + return nil, fmt.Errorf("computing size from tar-split: %w", err) + } + } + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - fsVerityDigests: make(map[string]string), - blobSize: blobSize, - tocDigest: tocDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, - manifest: manifest, - toc: toc, - pullOptions: pullOptions, - stream: iss, - tarSplit: tarSplit, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + uncompressedTarSize: uncompressedTarSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + toc: toc, + pullOptions: pullOptions, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, }, nil } @@ -234,16 +300,17 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig } return &chunkedDiffer{ - fsVerityDigests: make(map[string]string), - blobSize: blobSize, - tocDigest: tocDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeEstargz, - layersCache: layersCache, - manifest: manifest, - pullOptions: pullOptions, - stream: iss, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + uncompressedTarSize: -1, // We would have to read and decompress the whole layer + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + pullOptions: pullOptions, + stream: iss, + tocOffset: tocOffset, }, nil } @@ -947,11 +1014,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd i } if _, ok := err.(ErrBadRequest); ok { - // If the server cannot handle at least 64 chunks in a single request, just give up. - if len(chunksToRequest) < 64 { + if len(chunksToRequest) == 1 { return err } - // Merge more chunks to request missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2) calculateChunksToRequest() @@ -1102,6 +1167,7 @@ func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, if payload == nil { return "", errors.New("invalid stream returned") } + defer payload.Close() originalRawDigester := digest.Canonical.Digester() @@ -1128,7 +1194,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff var compressedDigest digest.Digest var uncompressedDigest digest.Digest - var convertedBlobSize int64 if c.convertToZstdChunked { fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) @@ -1160,7 +1225,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err != nil { return graphdriver.DriverWithDifferOutput{}, err } - convertedBlobSize = tarSize + c.uncompressedTarSize = tarSize // fileSource is a O_TMPFILE file descriptor, so we // need to keep it open until the entire file is processed. defer fileSource.Close() @@ -1230,6 +1295,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff TOCDigest: c.tocDigest, UncompressedDigest: uncompressedDigest, CompressedDigest: compressedDigest, + Size: c.uncompressedTarSize, } // When the hard links deduplication is used, file attributes are ignored because setting them @@ -1243,19 +1309,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff var missingParts []missingPart - mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries) + mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err } output.UIDs, output.GIDs = collectIDs(mergedEntries) - if convertedBlobSize > 0 { - // if the image was converted, store the original tar size, so that - // it can be recreated correctly. - output.Size = convertedBlobSize - } else { - output.Size = totalSizeFromTOC - } if err := maybeDoIDRemap(mergedEntries, options); err != nil { return output, err @@ -1331,7 +1390,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff wg.Wait() }() - for i := 0; i < copyGoRoutines; i++ { + for range copyGoRoutines { wg.Add(1) jobs := copyFileJobs @@ -1572,9 +1631,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { return false } -func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) { - var totalFilesSize int64 - +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) { countNextChunks := func(start int) int { count := 0 for _, e := range entries[start:] { @@ -1604,10 +1661,8 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i continue } - totalFilesSize += e.Size - if e.Type == TypeChunk { - return nil, -1, fmt.Errorf("chunk type without a regular file") + return nil, fmt.Errorf("chunk type without a regular file") } if e.Type == TypeReg { @@ -1643,7 +1698,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i lastChunkOffset = mergedEntries[i].chunks[j].Offset } } - return mergedEntries, totalFilesSize, nil + return mergedEntries, nil } // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index ac6bdfec7..7be0adeb8 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package chunked diff --git a/vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go b/vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go deleted file mode 100644 index aeb1698db..000000000 --- a/vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go +++ /dev/null @@ -1,68 +0,0 @@ -package chunked - -import ( - "bytes" - "fmt" - "io" - - "github.com/vbatts/tar-split/archive/tar" - "github.com/vbatts/tar-split/tar/storage" -) - -// iterateTarSplit calls handler for each tar header in tarSplit -func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error { - // This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream - // forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least - // for every non-empty file, which constraints it basically to the output we expect. - // - // Specifically, we assume: - // - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions - // - (There is a FileType entry for every tar header, we ignore it) - // - Trailing padding of a file, if any, is included in the next SegmentType entry - // - At the end, there may be SegmentType entries just for the terminating zero blocks. - - unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) - for { - tsEntry, err := unpacker.Next() - if err != nil { - if err == io.EOF { - return nil - } - return fmt.Errorf("reading tar-split entries: %w", err) - } - switch tsEntry.Type { - case storage.SegmentType: - payload := tsEntry.Payload - // This is horrible, but we don’t know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse - // files, but for sparse files that is set to the logical size.) - // - // First, assume that all padding is zero bytes. - // A tar header starts with a file name, which might in principle be empty, but - // at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that - // the tar name should never be empty (it should be ".", or maybe "./"). - // - // This will cause us to skip all zero bytes in the trailing blocks, but that’s fine. - i := 0 - for i < len(payload) && payload[i] == 0 { - i++ - } - payload = payload[i:] - tr := tar.NewReader(bytes.NewReader(payload)) - hdr, err := tr.Next() - if err != nil { - if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that. - break - } - return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err) - } - if err := handler(hdr); err != nil { - return err - } - - case storage.FileType: - // Nothing - default: - return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type) - } - } -} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index dd6c02a77..08060f2a2 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,11 +1,10 @@ //go:build !windows -// +build !windows package directory import ( + "errors" "io/fs" - "os" "path/filepath" "syscall" ) @@ -27,7 +26,7 @@ func Usage(dir string) (usage *DiskUsage, err error) { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if errors.Is(err, fs.ErrNotExist) && d != dir { return nil } return err @@ -35,6 +34,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { fileInfo, err := entry.Info() if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index 482bc51a2..c2145c26f 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -1,11 +1,10 @@ //go:build windows -// +build windows package directory import ( + "errors" "io/fs" - "os" "path/filepath" ) @@ -25,7 +24,7 @@ func Usage(dir string) (usage *DiskUsage, err error) { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && path != dir { + if errors.Is(err, fs.ErrNotExist) && path != dir { return nil } return err @@ -40,6 +39,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { fileInfo, err := d.Info() if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } usage.Size += fileInfo.Size() diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go new file mode 100644 index 000000000..eeecc9f75 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go @@ -0,0 +1,38 @@ +package fileutils + +import ( + "errors" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// Exists checks whether a file or directory exists at the given path. +// If the path is a symlink, the symlink is followed. +func Exists(path string) error { + // It uses unix.Faccessat which is a faster operation compared to os.Stat for + // simply checking the existence of a file. + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + if err != nil { + return &os.PathError{Op: "faccessat", Path: path, Err: err} + } + return nil +} + +// Lexists checks whether a file or directory exists at the given path. +// If the path is a symlink, the symlink itself is checked. +func Lexists(path string) error { + // FreeBSD before 15.0 does not support the AT_SYMLINK_NOFOLLOW flag for + // faccessat. In this case, the call to faccessat will return EINVAL and + // we fall back to using Lstat. + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + if errors.Is(err, syscall.EINVAL) { + _, err = os.Lstat(path) + return err + } + return &os.PathError{Op: "faccessat", Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go index f3087d7df..785b13317 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go @@ -1,5 +1,4 @@ -//go:build !windows -// +build !windows +//go:build !windows && !freebsd package fileutils diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go index 92e0263d8..3cb250c5a 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package fileutils diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go index 9854cac1c..6b9a853b7 100644 --- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package fsutils diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go index 46e68c578..80b9171db 100644 --- a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package fsverity diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 45be87659..f351b48bb 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package homedir diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go index 35b93e626..db31da768 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -1,10 +1,11 @@ //go:build linux -// +build linux package idmap import ( + "errors" "fmt" + "io/fs" "os" "runtime" "syscall" @@ -26,7 +27,7 @@ func CreateIDMappedMount(source, target string, pid int) error { targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE) if err != nil { - return err + return &os.PathError{Op: "open_tree", Path: source, Err: err} } defer unix.Close(targetDirFd) @@ -35,13 +36,16 @@ func CreateIDMappedMount(source, target string, pid int) error { Attr_set: unix.MOUNT_ATTR_IDMAP, Userns_fd: uint64(userNsFile.Fd()), }); err != nil { - return err + return &os.PathError{Op: "mount_setattr", Path: source, Err: err} } - if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) { + if err := os.Mkdir(target, 0o700); err != nil && !errors.Is(err, fs.ErrExist) { return err } - return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH) + if err := unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil { + return &os.PathError{Op: "move_mount", Path: target, Err: err} + } + return nil } // CreateUsernsProcess forks the current process and creates a user namespace using the specified diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go index 81c6072aa..d58137b99 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package idmap diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index dc963481a..248594e93 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -367,7 +367,7 @@ func checkChownErr(err error, name string, uid, gid int) error { return err } -// Stat contains file states that can be overriden with ContainersOverrideXattr. +// Stat contains file states that can be overridden with ContainersOverrideXattr. type Stat struct { IDs IDPair Mode os.FileMode diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index 03e787376..2bd26d0e3 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -1,5 +1,4 @@ //go:build linux && cgo && libsubid -// +build linux,cgo,libsubid package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 7900af38a..1da7dadbf 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go index 78141fb85..e6f5c1ba6 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || !libsubid || !cgo -// +build !linux !libsubid !cgo package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go index dc69c6076..ec6a3a046 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go index 15bd98ede..e37c4540c 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go index b3772bdb3..f34462a23 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go index aec161e0f..2ccdc3108 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package ioutils diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go index 9d5af610e..257b064c5 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package ioutils diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go index 2c2242d69..79837fb33 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package ioutils diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index 25a71ac90..52f6c0a62 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -128,9 +128,8 @@ func GetROLockfile(path string) (Locker, error) { func (l *LockFile) Lock() { if l.ro { panic("can't take write lock on read-only lock file") - } else { - l.lock(writeLock) } + l.lock(writeLock) } // RLock locks the lockfile as a reader. @@ -142,9 +141,8 @@ func (l *LockFile) RLock() { func (l *LockFile) TryLock() error { if l.ro { panic("can't take write lock on read-only lock file") - } else { - return l.tryLock(writeLock) } + return l.tryLock(writeLock) } // TryRLock attempts to lock the lockfile as a reader. diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 6c8399f9e..885f2f88a 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package lockfile diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 6482529b3..0cc1c50cc 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package lockfile diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index 067dd7cd9..4ad634517 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package loopback @@ -41,11 +40,10 @@ func getNextFreeLoopbackIndex() (int, error) { return index, err } -func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { +func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (*os.File, error) { // Read information about the loopback file. var st syscall.Stat_t - err = syscall.Fstat(int(sparseFile.Fd()), &st) - if err != nil { + if err := syscall.Fstat(int(sparseFile.Fd()), &st); err != nil { logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err) return nil, ErrAttachLoopbackDevice } @@ -70,7 +68,7 @@ func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile target := fmt.Sprintf("/dev/loop%d", index) // OpenFile adds O_CLOEXEC - loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644) + loopFile, err := os.OpenFile(target, os.O_RDWR, 0o644) if err != nil { // The kernel returns ENXIO when opening a device that is in the "deleting" or "rundown" state, so // just treat ENXIO as if the device does not exist. @@ -100,13 +98,12 @@ func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile loopFile.Close() // If the error is EBUSY, then try the next loopback - if err != syscall.EBUSY { - logrus.Errorf("Cannot set up loopback device %s: %s", target, err) - return nil, ErrAttachLoopbackDevice + if err == syscall.EBUSY { + continue } - // Otherwise, we keep going with the loop - continue + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice } // Check if the loopback driver and underlying filesystem agree on the loopback file's @@ -119,18 +116,8 @@ func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile if dev != uint64(st.Dev) || ino != st.Ino { logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) } - - // In case of success, we finished. Break the loop. - break + return loopFile, nil } - - // This can't happen, but let's be sure - if loopFile == nil { - logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil } // AttachLoopDevice attaches the given sparse file to the next diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go index da2ba46fe..cf77e5232 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go index 21a981007..2a4f5cbcf 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go index ec4287247..2d8821279 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go index 5de3a671d..40a229932 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -97,14 +97,14 @@ func MergeTmpfsOptions(options []string) ([]string, error) { } continue } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { + opt, _, ok := strings.Cut(option, "=") + if !ok || !validFlags[opt] { return nil, fmt.Errorf("invalid tmpfs option %q", opt) } - if !dataCollisions[opt[0]] { + if !dataCollisions[opt] { // We prepend the option and add to collision map newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true + dataCollisions[opt] = true } } @@ -140,8 +140,8 @@ func ParseOptions(options string) (int, string) { func ParseTmpfsOptions(options string) (int, string, error) { flags, data := ParseOptions(options) for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { + opt, _, _ := strings.Cut(o, "=") + if !validFlags[opt] { return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) } } diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index ee0f593a5..e581d64eb 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index c70b0bf99..61d6d1c59 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd && cgo -// +build freebsd,cgo package mount @@ -40,13 +39,9 @@ func mount(device, target, mType string, flag uintptr, data string) error { isNullFS = true continue } - opt := strings.SplitN(x, "=", 2) - options = append(options, opt[0]) - if len(opt) == 2 { - options = append(options, opt[1]) - } else { - options = append(options, "") - } + name, val, _ := strings.Cut(x, "=") + options = append(options, name) + options = append(options, val) } } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go index 74fe66609..b9dc82d3f 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -1,6 +1,4 @@ //go:build !linux && !(freebsd && cgo) -// +build !linux -// +build !freebsd !cgo package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go index a2a1d4072..331272e0c 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package mount @@ -11,7 +10,7 @@ import ( func unmount(target string, flags int) error { var err error - for i := 0; i < 50; i++ { + for range 50 { err = unix.Unmount(target, flags) switch err { case unix.EBUSY: diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go index d3a0cf51c..3c942bfb2 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package mount diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go index 3fb0c36b8..7b20b0628 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -11,11 +11,11 @@ import ( // ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { + k, v, ok := strings.Cut(opt, "=") + if !ok { return "", "", fmt.Errorf("unable to parse key/value option: %s", opt) } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil + return strings.TrimSpace(k), strings.TrimSpace(v), nil } // ParseUintList parses and validates the specified string as the value @@ -42,19 +42,19 @@ func ParseUintList(val string) (map[int]bool, error) { errInvalidFormat := fmt.Errorf("invalid format: %s", val) for _, r := range split { - if !strings.Contains(r, "-") { + minS, maxS, ok := strings.Cut(r, "-") + if !ok { v, err := strconv.Atoi(r) if err != nil { return nil, errInvalidFormat } availableInts[v] = true } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) + min, err := strconv.Atoi(minS) if err != nil { return nil, errInvalidFormat } - max, err := strconv.Atoi(split[1]) + max, err := strconv.Atoi(maxS) if err != nil { return nil, errInvalidFormat } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go index 32d6a9f49..171cd81e7 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 87b43ed95..025aef60a 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index a56ada216..eefddea41 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,5 +1,4 @@ //go:build solaris || darwin -// +build solaris darwin package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 77c93b4ab..a78b548a5 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows && !freebsd && !solaris && !darwin -// +build !linux,!windows,!freebsd,!solaris,!darwin package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index c46125ebf..ba2f0f847 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package reexec diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go index 834dd9433..ccd9d0fb1 100644 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go @@ -1,5 +1,4 @@ //go:build !regexp_precompile -// +build !regexp_precompile package regexp diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go index a5fe0dbc4..fe4421b01 100644 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go @@ -1,5 +1,4 @@ //go:build regexp_precompile -// +build regexp_precompile package regexp diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go index 66a59c85d..f63c3e444 100644 --- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -3,7 +3,7 @@ package stringutils import ( "bytes" - "math/rand" + "math/rand/v2" "strings" ) @@ -13,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string { letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]byte, n) for i := range b { - b[i] = letters[rand.Intn(len(letters))] + b[i] = letters[rand.IntN(len(letters))] } return string(b) } @@ -25,7 +25,7 @@ func GenerateRandomASCIIString(n int) string { "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " res := make([]byte, n) for i := 0; i < n; i++ { - res[i] = chars[rand.Intn(len(chars))] + res[i] = chars[rand.IntN(len(chars))] } return string(res) } diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go index 1ce4c0d6e..892d56138 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go index 7a3d7937d..f0d744eb8 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go index 4eaeb5d69..f9de938dd 100644 --- a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go +++ b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go index 42658c8b9..037ccf59d 100644 --- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go index 9b13e6146..826c1f9c3 100644 --- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go index 37da93aa0..589cbeba7 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd && cgo -// +build freebsd,cgo package system diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index a90b23e03..17474e114 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -1,5 +1,4 @@ //go:build solaris && cgo -// +build solaris,cgo package system diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go index 0f9feb1d2..db0864275 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -1,8 +1,4 @@ //go:build !linux && !windows && !solaris && !(freebsd && cgo) -// +build !linux -// +build !windows -// +build !solaris -// +build !freebsd !cgo package system diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go index d3d0ed8a1..ff679c5b1 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -1,5 +1,4 @@ //go:build !windows && !freebsd -// +build !windows,!freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go index 53c3f2837..d94353600 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go index c35b1b346..752f90b14 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go index ff01143ee..fc8de3e4d 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go index 9f2509738..8838d9fd2 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go index 7ee59d926..5090f3042 100644 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || solaris || darwin -// +build linux freebsd solaris darwin package system diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go index 117eb1d6d..db214c4cd 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm_common.go +++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go index 2f44d18b6..1d57b7f40 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_common.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go index e552e91d7..ffe45f32d 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go index 217e2fe83..d1b41f34d 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go index ad0337db7..9b02a1887 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go index 9497596a0..c0b69ab1b 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go index 843ecdc53..b6c36339d 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index 8bd7acf1f..0ab61f3de 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package system diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go index 08dbc661d..14aaeddcf 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go index 25054810a..f970935b5 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go @@ -1,5 +1,4 @@ //go:build linux && !cgo -// +build linux,!cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go index fbfb90d59..f575fba2e 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -1,5 +1,4 @@ //go:build (linux && cgo && !gccgo) || (freebsd && cgo) -// +build linux,cgo,!gccgo freebsd,cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go index 480e2fcb0..5d0a7a683 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go index 7a44ca301..37a87fa5b 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go index 21a43d38c..818983474 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go @@ -1,5 +1,4 @@ //go:build linux && cgo && gccgo -// +build linux,cgo,gccgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 32e8d7dca..b45a6819a 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package unshare @@ -21,9 +20,9 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" + "github.com/moby/sys/capability" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" ) // Cmd wraps an exec.Cmd created by the reexec package in unshare(), and diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go index e3160d0da..05706b8fe 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go index a6b38eda8..ae2869d74 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go @@ -1,5 +1,4 @@ //go:build cgo && !(linux || freebsd) -// +build cgo,!linux,!freebsd package unshare diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 94a9b36d4..962325233 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -8,12 +8,12 @@ # /usr/containers/storage.conf # /etc/containers/storage.conf # $HOME/.config/containers/storage.conf -# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# $XDG_CONFIG_HOME/containers/storage.conf (if XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information -# The "container storage" table contains all of the server options. +# The "storage" table contains all of the server options. [storage] -# Default Storage Driver, Must be set for proper operation. +# Default storage driver, must be set for proper operation. driver = "overlay" # Temporary storage location @@ -24,8 +24,8 @@ runroot = "/run/containers/storage" # driver_priority = ["overlay", "btrfs"] # Primary Read/Write location of container storage -# When changing the graphroot location on an SELINUX system, you must -# ensure the labeling matches the default locations labels with the +# When changing the graphroot location on an SELinux system, you must +# ensure the labeling matches the default location's labels with the # following commands: # semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH # restorecon -R -v /NEWSTORAGEPATH @@ -54,32 +54,31 @@ graphroot = "/var/lib/containers/storage" additionalimagestores = [ ] -# Allows specification of how storage is populated when pulling images. This -# option can speed the pulling process of images compressed with format -# zstd:chunked. Containers/storage looks for files within images that are being -# pulled from a container registry that were previously pulled to the host. It -# can copy or create a hard link to the existing file when it finds them, -# eliminating the need to pull them from the container registry. These options -# can deduplicate pulling of content, disk storage of content and can allow the -# kernel to use less memory when running containers. - -# containers/storage supports four keys -# * enable_partial_images="true" | "false" -# Tells containers/storage to look for files previously pulled in storage -# rather then always pulling them from the container registry. -# * use_hard_links = "false" | "true" -# Tells containers/storage to use hard links rather then create new files in -# the image, if an identical file already existed in storage. -# * ostree_repos = "" -# Tells containers/storage where an ostree repository exists that might have -# previously pulled content which can be used when attempting to avoid -# pulling content from the container registry -# * convert_images = "false" | "true" -# If set to true, containers/storage will convert images to a -# format compatible with partial pulls in order to take advantage -# of local deduplication and hard linking. It is an expensive -# operation so it is not enabled by default. -pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""} +# Options controlling how storage is populated when pulling images. +[storage.options.pull_options] +# Enable the "zstd:chunked" feature, which allows partial pulls, reusing +# content that already exists on the system. This is disabled by default, +# and must be explicitly enabled to be used. For more on zstd:chunked, see +# https://github.com/containers/storage/blob/main/docs/containers-storage-zstd-chunked.md +# This is a "string bool": "false" | "true" (cannot be native TOML boolean) +# enable_partial_images = "false" + +# Tells containers/storage to use hard links rather then create new files in +# the image, if an identical file already existed in storage. +# This is a "string bool": "false" | "true" (cannot be native TOML boolean) +# use_hard_links = "false" + +# Path to an ostree repository that might have +# previously pulled content which can be used when attempting to avoid +# pulling content from the container registry. +# ostree_repos="" + +# If set to "true", containers/storage will convert images that are +# not already in zstd:chunked format to that format before processing +# in order to take advantage of local deduplication and hard linking. +# It is an expensive operation so it is not enabled by default. +# This is a "string bool": "false" | "true" (cannot be native TOML boolean) +# convert_images = "false" # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned @@ -102,6 +101,7 @@ pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree # squashed down to the default uid in the container. These images will have no # separation between the users in the container. Only supported for the overlay # and vfs drivers. +# This is a "string bool": "false" | "true" (cannot be native TOML boolean) #ignore_chown_errors = "false" # Inodes is used to set a maximum inodes of the container image. @@ -115,9 +115,11 @@ pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree mountopt = "nodev" # Set to skip a PRIVATE bind mount on the storage home directory. +# This is a "string bool": "false" | "true" (cannot be native TOML boolean) # skip_mount_home = "false" # Set to use composefs to mount data layers with overlay. +# This is a "string bool": "false" | "true" (cannot be native TOML boolean) # use_composefs = "false" # Size is used to set a maximum size of the container image. diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index efcecfae8..a2d385008 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -6,9 +6,11 @@ import ( "errors" "fmt" "io" + "maps" "os" "path/filepath" "reflect" + "slices" "strings" "sync" "syscall" @@ -84,6 +86,20 @@ type ApplyStagedLayerOptions struct { DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory } +// MultiListOptions contains options to pass to MultiList +type MultiListOptions struct { + Images bool // if true, Images will be listed in the result + Layers bool // if true, layers will be listed in the result + Containers bool // if true, containers will be listed in the result +} + +// MultiListResult contains slices of Images, Layers or Containers listed by MultiList method +type MultiListResult struct { + Images []Image + Layers []Layer + Containers []Container +} + // An roBigDataStore wraps up the read-only big-data related methods of the // various types of file-based lookaside stores that we implement. type roBigDataStore interface { @@ -325,11 +341,17 @@ type Store interface { // } ApplyDiff(to string, diff io.Reader) (int64, error) - // ApplyDiffer applies a diff to a layer. + // ApplyDiffWithDiffer applies a diff to a layer. // It is the caller responsibility to clean the staging directory if it is not - // successfully applied with ApplyDiffFromStagingDirectory. + // successfully applied with ApplyStagedLayer. + // Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + // PrepareStagedLayer applies a diff to a layer. + // It is the caller responsibility to clean the staging directory if it is not + // successfully applied with ApplyStagedLayer. + PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + // ApplyStagedLayer combines the functions of creating a layer and using the staging // directory to populate it. // It marks the layer for automatic removal if applying the diff fails for any reason. @@ -561,6 +583,12 @@ type Store interface { // usually by deleting layers and images which are damaged. If the // right options are set, it will remove containers as well. Repair(report CheckReport, options *RepairOptions) []error + + // MultiList returns a MultiListResult structure that contains layer, image, or container + // extracts according to the values in MultiListOptions. + // MultiList returns consistent values as of a single point in time. + // WARNING: The values may already be out of date by the time they are returned to the caller. + MultiList(MultiListOptions) (MultiListResult, error) } // AdditionalLayer represents a layer that is contained in the additional layer store @@ -849,8 +877,8 @@ func GetStore(options types.StoreOptions) (Store, error) { graphOptions: options.GraphDriverOptions, imageStoreDir: options.ImageStore, pullOptions: options.PullOptions, - uidMap: copyIDMap(options.UIDMap), - gidMap: copyIDMap(options.GIDMap), + uidMap: copySlicePreferringNil(options.UIDMap), + gidMap: copySlicePreferringNil(options.GIDMap), autoUsernsUser: options.RootAutoNsUser, autoNsMinSize: autoNsMinSize, autoNsMaxSize: autoNsMaxSize, @@ -869,30 +897,6 @@ func GetStore(options types.StoreOptions) (Store, error) { return s, nil } -func copyUint32Slice(slice []uint32) []uint32 { - m := []uint32{} - if slice != nil { - m = make([]uint32, len(slice)) - copy(m, slice) - } - if len(m) > 0 { - return m[:] - } - return nil -} - -func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { - m := []idtools.IDMap{} - if idmap != nil { - m = make([]idtools.IDMap, len(idmap)) - copy(m, idmap) - } - if len(m) > 0 { - return m[:] - } - return nil -} - func (s *store) RunRoot() string { return s.runRoot } @@ -919,18 +923,16 @@ func (s *store) GraphOptions() []string { func (s *store) PullOptions() map[string]string { cp := make(map[string]string, len(s.pullOptions)) - for k, v := range s.pullOptions { - cp[k] = v - } + maps.Copy(cp, s.pullOptions) return cp } func (s *store) UIDMap() []idtools.IDMap { - return copyIDMap(s.uidMap) + return copySlicePreferringNil(s.uidMap) } func (s *store) GIDMap() []idtools.IDMap { - return copyIDMap(s.gidMap) + return copySlicePreferringNil(s.gidMap) } // This must only be called when constructing store; it writes to fields that are assumed to be constant after construction. @@ -1443,8 +1445,8 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare var options LayerOptions if lOptions != nil { options = *lOptions - options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData) - options.Flags = copyStringInterfaceMap(lOptions.Flags) + options.BigData = slices.Clone(lOptions.BigData) + options.Flags = copyMapPreferringNil(lOptions.Flags) } if options.HostUIDMapping { options.UIDMap = nil @@ -1515,8 +1517,8 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare options.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: options.HostUIDMapping, HostGIDMapping: options.HostGIDMapping, - UIDMap: copyIDMap(uidMap), - GIDMap: copyIDMap(gidMap), + UIDMap: copySlicePreferringNil(uidMap), + GIDMap: copySlicePreferringNil(gidMap), } } return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo) @@ -1584,8 +1586,8 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i Metadata: i.Metadata, CreationDate: i.Created, Digest: i.Digest, - Digests: copyDigestSlice(i.Digests), - NamesHistory: copyStringSlice(i.NamesHistory), + Digests: copySlicePreferringNil(i.Digests), + NamesHistory: copySlicePreferringNil(i.NamesHistory), } for _, key := range i.BigDataNames { data, err := store.BigData(id, key) @@ -1602,7 +1604,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i Digest: dataDigest, }) } - namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) + namesToAddAfterCreating = dedupeStrings(slices.Concat(i.Names, names)) break } } @@ -1616,18 +1618,16 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i if iOptions.Digest != "" { options.Digest = iOptions.Digest } - options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) + options.Digests = append(options.Digests, iOptions.Digests...) if iOptions.Metadata != "" { options.Metadata = iOptions.Metadata } options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) - options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) + options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...) if options.Flags == nil { options.Flags = make(map[string]interface{}) } - for k, v := range iOptions.Flags { - options.Flags[k] = v - } + maps.Copy(options.Flags, iOptions.Flags) } if options.CreationDate.IsZero() { @@ -1736,8 +1736,8 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst layerOptions.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: options.HostUIDMapping, HostGIDMapping: options.HostGIDMapping, - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), + UIDMap: copySlicePreferringNil(options.UIDMap), + GIDMap: copySlicePreferringNil(options.GIDMap), } } layerOptions.TemplateLayer = layer.ID @@ -1759,12 +1759,12 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat var options ContainerOptions if cOptions != nil { options = *cOptions - options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap) - options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap) - options.LabelOpts = copyStringSlice(cOptions.LabelOpts) - options.Flags = copyStringInterfaceMap(cOptions.Flags) - options.MountOpts = copyStringSlice(cOptions.MountOpts) - options.StorageOpt = copyStringStringMap(cOptions.StorageOpt) + options.IDMappingOptions.UIDMap = copySlicePreferringNil(cOptions.IDMappingOptions.UIDMap) + options.IDMappingOptions.GIDMap = copySlicePreferringNil(cOptions.IDMappingOptions.GIDMap) + options.LabelOpts = copySlicePreferringNil(cOptions.LabelOpts) + options.Flags = copyMapPreferringNil(cOptions.Flags) + options.MountOpts = copySlicePreferringNil(cOptions.MountOpts) + options.StorageOpt = copyMapPreferringNil(cOptions.StorageOpt) options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData) } if options.HostUIDMapping { @@ -1889,8 +1889,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat layerOptions.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: idMappingsOptions.HostUIDMapping, HostGIDMapping: idMappingsOptions.HostGIDMapping, - UIDMap: copyIDMap(uidMap), - GIDMap: copyIDMap(gidMap), + UIDMap: copySlicePreferringNil(uidMap), + GIDMap: copySlicePreferringNil(gidMap), } } if options.Flags == nil { @@ -1927,8 +1927,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: len(options.UIDMap) == 0, HostGIDMapping: len(options.GIDMap) == 0, - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), + UIDMap: copySlicePreferringNil(options.UIDMap), + GIDMap: copySlicePreferringNil(options.GIDMap), } container, err := s.containerStore.create(id, names, imageID, layer, &options) if err != nil || container == nil { @@ -2177,7 +2177,7 @@ func (s *store) ImageSize(id string) (int64, error) { } // The UncompressedSize is only valid if there's a digest to go with it. n := layer.UncompressedSize - if layer.UncompressedDigest == "" { + if layer.UncompressedDigest == "" || n == -1 { // Compute the size. n, err = layerStore.DiffSize("", layer.ID) if err != nil { @@ -2426,10 +2426,10 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e options := ImageOptions{ CreationDate: i.Created, Digest: i.Digest, - Digests: copyDigestSlice(i.Digests), + Digests: copySlicePreferringNil(i.Digests), Metadata: i.Metadata, - NamesHistory: copyStringSlice(i.NamesHistory), - Flags: copyStringInterfaceMap(i.Flags), + NamesHistory: copySlicePreferringNil(i.NamesHistory), + Flags: copyMapPreferringNil(i.Flags), } for _, key := range i.BigDataNames { data, err := store.BigData(id, key) @@ -2943,6 +2943,10 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) { if err != nil { return nil, err } + + // While the general rules require the layer store to only be locked RO (apart from known LOCKING BUGs) + // the overlay driver requires the primary layer store to be locked RW; see + // drivers/overlay.Driver.getMergedDir. if err := rlstore.startWriting(); err != nil { return nil, err } @@ -2999,6 +3003,9 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro return nil, err } + // While the general rules require the layer store to only be locked RO (apart from known LOCKING BUGs) + // the overlay driver requires the primary layer store to be locked RW; see + // drivers/overlay.Driver.getMergedDir. if err := rlstore.startWriting(); err != nil { return nil, err } @@ -3078,13 +3085,19 @@ func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) e return err } +func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + rlstore, err := s.getLayerStore() + if err != nil { + return nil, err + } + return rlstore.applyDiffWithDifferNoLock(options, differ) +} + func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { - if to != "" && !rlstore.Exists(to) { - return nil, ErrLayerUnknown - } - return rlstore.ApplyDiffWithDiffer(to, options, differ) - }) + if to != "" { + return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter") + } + return s.PrepareStagedLayer(options, differ) } func (s *store) DifferTarget(id string) (string, error) { @@ -3637,79 +3650,47 @@ func makeBigDataBaseName(key string) string { } func stringSliceWithoutValue(slice []string, value string) []string { - modified := make([]string, 0, len(slice)) - for _, v := range slice { - if v == value { - continue - } - modified = append(modified, v) - } - return modified + return slices.DeleteFunc(slices.Clone(slice), func(v string) bool { + return v == value + }) } -func copyStringSlice(slice []string) []string { - if len(slice) == 0 { +// copySlicePreferringNil returns a copy of the slice. +// If s is empty, a nil is returned. +func copySlicePreferringNil[S ~[]E, E any](s S) S { + if len(s) == 0 { return nil } - ret := make([]string, len(slice)) - copy(ret, slice) - return ret -} - -func copyStringInt64Map(m map[string]int64) map[string]int64 { - ret := make(map[string]int64, len(m)) - for k, v := range m { - ret[k] = v - } - return ret -} - -func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest { - ret := make(map[string]digest.Digest, len(m)) - for k, v := range m { - ret[k] = v - } - return ret -} - -func copyStringStringMap(m map[string]string) map[string]string { - ret := make(map[string]string, len(m)) - for k, v := range m { - ret[k] = v - } - return ret + return slices.Clone(s) } -func copyDigestSlice(slice []digest.Digest) []digest.Digest { - if len(slice) == 0 { +// copyMapPreferringNil returns a shallow clone of map m. +// If m is empty, a nil is returned. +// +// (As of, e.g., Go 1.23, maps.Clone preserves nil, but that’s not a documented promise; +// and this function turns even non-nil empty maps into nil.) +func copyMapPreferringNil[K comparable, V any](m map[K]V) map[K]V { + if len(m) == 0 { return nil } - ret := make([]digest.Digest, len(slice)) - copy(ret, slice) - return ret + return maps.Clone(m) } -// copyStringInterfaceMap still forces us to assume that the interface{} is -// a non-pointer scalar value -func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { - ret := make(map[string]interface{}, len(m)) +// newMapFrom returns a shallow clone of map m. +// If m is empty, an empty map is allocated and returned. +func newMapFrom[K comparable, V any](m map[K]V) map[K]V { + ret := make(map[K]V, len(m)) for k, v := range m { ret[k] = v } return ret } -func copyLayerBigDataOptionSlice(slice []LayerBigDataOption) []LayerBigDataOption { - ret := make([]LayerBigDataOption, len(slice)) - copy(ret, slice) - return ret -} - func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption { ret := make([]ImageBigDataOption, len(slice)) for i := range slice { ret[i].Key = slice[i].Key - ret[i].Data = append([]byte{}, slice[i].Data...) + ret[i].Data = slices.Clone(slice[i].Data) ret[i].Digest = slice[i].Digest } return ret @@ -3719,7 +3700,7 @@ func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []Container ret := make([]ContainerBigDataOption, len(slice)) for i := range slice { ret[i].Key = slice[i].Key - ret[i].Data = append([]byte{}, slice[i].Data...) + ret[i].Data = slices.Clone(slice[i].Data) } return ret } @@ -3773,10 +3754,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro return nil, err } key = strings.ToLower(key) - for _, m := range mountOpts { - if m == key { - return strings.Split(val, ","), nil - } + if slices.Contains(mountOpts, key) { + return strings.Split(val, ","), nil } } return nil, nil @@ -3784,11 +3763,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro // Free removes the store from the list of stores func (s *store) Free() { - for i := 0; i < len(stores); i++ { - if stores[i] == s { - stores = append(stores[:i], stores[i+1:]...) - return - } + if i := slices.Index(stores, s); i != -1 { + stores = slices.Delete(stores, i, i+1) } } @@ -3815,3 +3791,55 @@ func (s *store) GarbageCollect() error { return firstErr } + +// List returns a MultiListResult structure that contains layer, image, or container +// extracts according to the values in MultiListOptions. +func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) { + // TODO: Possible optimization: Deduplicate content from multiple stores. + out := MultiListResult{} + + if options.Layers { + layerStores, err := s.allLayerStores() + if err != nil { + return MultiListResult{}, err + } + for _, roStore := range layerStores { + if err := roStore.startReading(); err != nil { + return MultiListResult{}, err + } + defer roStore.stopReading() + layers, err := roStore.Layers() + if err != nil { + return MultiListResult{}, err + } + out.Layers = append(out.Layers, layers...) + } + } + + if options.Images { + for _, roStore := range s.allImageStores() { + if err := roStore.startReading(); err != nil { + return MultiListResult{}, err + } + defer roStore.stopReading() + + images, err := roStore.Images() + if err != nil { + return MultiListResult{}, err + } + out.Images = append(out.Images, images...) + } + } + + if options.Containers { + containers, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) + if err != nil { + return MultiListResult{}, err + } + out.Containers = append(out.Containers, containers...) + } + return out, nil +} diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index f1a900b8d..efc08c476 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -344,8 +344,8 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { dirEntries, err := os.ReadDir(opts.GraphRoot) if err == nil { for _, entry := range dirEntries { - if strings.HasSuffix(entry.Name(), "-images") { - opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images") + if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok { + opts.GraphDriverName = name break } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 5bade6ffe..c61d79837 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "slices" "github.com/containers/storage/types" ) @@ -41,22 +42,12 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO // remove given names from old names result = make([]string, 0, len(oldNames)) for _, name := range oldNames { - // only keep names in final result which do not intersect with input names - // basically `result = oldNames - opParameters` - nameShouldBeRemoved := false - for _, opName := range opParameters { - if name == opName { - nameShouldBeRemoved = true - } - } - if !nameShouldBeRemoved { + if !slices.Contains(opParameters, name) { result = append(result, name) } } case addNames: - result = make([]string, 0, len(opParameters)+len(oldNames)) - result = append(result, opParameters...) - result = append(result, oldNames...) + result = slices.Concat(opParameters, oldNames) default: return result, errInvalidUpdateNameOperation } diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index 7436896e1..04b5685ab 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,6 +6,43 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## +## [0.3.4] - 2024-10-09 ## + +### Fixed ### +- Previously, some testing mocks we had resulted in us doing `import "testing"` + in non-`_test.go` code, which made some downstreams like Kubernetes unhappy. + This has been fixed. (#32) + +## [0.3.3] - 2024-09-30 ## + +### Fixed ### +- The mode and owner verification logic in `MkdirAll` has been removed. This + was originally intended to protect against some theoretical attacks but upon + further consideration these protections don't actually buy us anything and + they were causing spurious errors with more complicated filesystem setups. +- The "is the created directory empty" logic in `MkdirAll` has also been + removed. This was not causing us issues yet, but some pseudofilesystems (such + as `cgroup`) create non-empty directories and so this logic would've been + wrong for such cases. + +## [0.3.2] - 2024-09-13 ## + +### Changed ### +- Passing the `S_ISUID` or `S_ISGID` modes to `MkdirAllInRoot` will now return + an explicit error saying that those bits are ignored by `mkdirat(2)`. In the + past a different error was returned, but since the silent ignoring behaviour + is codified in the man pages a more explicit error seems apt. While silently + ignoring these bits would be the most compatible option, it could lead to + users thinking their code sets these bits when it doesn't. Programs that need + to deal with compatibility can mask the bits themselves. (#23, #25) + +### Fixed ### +- If a directory has `S_ISGID` set, then all child directories will have + `S_ISGID` set when created and a different gid will be used for any inode + created under the directory. Previously, the "expected owner and mode" + validation in `securejoin.MkdirAll` did not correctly handle this. We now + correctly handle this case. (#24, #25) + ## [0.3.1] - 2024-07-23 ## ### Changed ### @@ -127,7 +164,10 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...HEAD +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...HEAD +[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4 +[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3 +[0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2 [0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1 [0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0 [0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5 diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 253956f86..eaeb53fcd 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -1,5 +1,6 @@ ## `filepath-securejoin` ## +[![Go Documentation](https://pkg.go.dev/badge/github.com/cyphar/filepath-securejoin.svg)](https://pkg.go.dev/github.com/cyphar/filepath-securejoin) [![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) ### Old API ### @@ -85,7 +86,7 @@ more secure. In particular: or avoid being tricked by a `/proc` that is not legitimate. This is done using [`openat2`][openat2.2] for all users, and privileged users will also be further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2] - (Linux 4.18 or later). + (Linux 5.2 or later). [openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html [fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 9e11b32fc..42045acae 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.3.1 +0.3.4 diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go new file mode 100644 index 000000000..1ec7d065e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go @@ -0,0 +1,39 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin implements a set of helpers to make it easier to write Go +// code that is safe against symlink-related escape attacks. The primary idea +// is to let you resolve a path within a rootfs directory as if the rootfs was +// a chroot. +// +// securejoin has two APIs, a "legacy" API and a "modern" API. +// +// The legacy API is [SecureJoin] and [SecureJoinVFS]. These methods are +// **not** safe against race conditions where an attacker changes the +// filesystem after (or during) the [SecureJoin] operation. +// +// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived +// functions). These are safe against racing attackers and have several other +// protections that are not provided by the legacy API. There are many more +// operations that most programs expect to be able to do safely, but we do not +// provide explicit support for them because we want to encourage users to +// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a +// cross-language next-generation library that is entirely designed around +// operating on paths safely. +// +// securejoin has been used by several container runtimes (Docker, runc, +// Kubernetes, etc) for quite a few years as a de-facto standard for operating +// on container filesystem paths "safely". However, most users still use the +// legacy API which is unsafe against various attacks (there is a fairly long +// history of CVEs in dependent as a result). Users should switch to the modern +// API as soon as possible (or even better, switch to libpathrs). +// +// This project was initially intended to be included in the Go standard +// library, but [it was rejected](https://go.dev/issue/20126). There is now a +// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API +// that shares some of the goals of filepath-securejoin. However, that design +// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the +// usecase of container runtimes and most system tools. +package securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index bd86a48b0..e0ee3f2b5 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -3,11 +3,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package securejoin is an implementation of the hopefully-soon-to-be-included -// SecureJoin helper that is meant to be part of the "path/filepath" package. -// The purpose of this project is to provide a PoC implementation to make the -// SecureJoin proposal (https://github.com/golang/go/issues/20126) more -// tangible. package securejoin import ( @@ -22,27 +17,27 @@ const maxSymlinkLimit = 255 // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is -// effectively a more broad version of os.IsNotExist. +// effectively a more broad version of [os.IsNotExist]. func IsNotExist(err error) bool { // Check that it's not actually an ENOTDIR, which in some cases is a more // convoluted case of ENOENT (usually involving weird paths). return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } -// SecureJoinVFS joins the two given path components (similar to Join) except +// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except // that the returned path is guaranteed to be scoped inside the provided root // path (when evaluated). Any symbolic links in the path are evaluated with the // given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given VFS interface (if nil, the -// standard os.* family of functions are used). +// filesystem state is evaluated through the given [VFS] interface (if nil, the +// standard [os].* family of functions are used). // // Note that the guarantees provided by this function only apply if the path // components in the returned string are not modified (in other words are not // replaced with symlinks on the filesystem) after this function has returned. -// Such a symlink race is necessarily out-of-scope of SecureJoin. +// Such a symlink race is necessarily out-of-scope of SecureJoinVFS. // // NOTE: Due to the above limitation, Linux users are strongly encouraged to -// use OpenInRoot instead, which does safely protect against these kinds of +// use [OpenInRoot] instead, which does safely protect against these kinds of // attacks. There is no way to solve this problem with SecureJoinVFS because // the API is fundamentally wrong (you cannot return a "safe" path string and // guarantee it won't be modified afterwards). @@ -123,8 +118,8 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { return filepath.Join(root, finalPath), nil } -// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library -// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +// SecureJoin is a wrapper around [SecureJoinVFS] that just uses the [os].* library +// of functions as the [VFS]. If in doubt, use this function over [SecureJoinVFS]. func SecureJoin(root, unsafePath string) (string, error) { return SecureJoinVFS(root, unsafePath, nil) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go index ad2bd7973..b5f674524 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -9,7 +9,6 @@ package securejoin import ( "errors" "fmt" - "io" "os" "path/filepath" "slices" @@ -23,29 +22,36 @@ var ( errPossibleAttack = errors.New("possible attack detected") ) -// MkdirAllHandle is equivalent to MkdirAll, except that it is safer to use in -// two respects: +// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use +// in two respects: // -// - The caller provides the root directory as an *os.File (preferably O_PATH) +// - The caller provides the root directory as an *[os.File] (preferably O_PATH) // handle. This means that the caller can be sure which root directory is // being used. Note that this can be emulated by using /proc/self/fd/... as -// the root path with MkdirAll. +// the root path with [os.MkdirAll]. // -// - Once all of the directories have been created, an *os.File (O_PATH) handle +// - Once all of the directories have been created, an *[os.File] O_PATH handle // to the directory at unsafePath is returned to the caller. This is done in // an effectively-race-free way (an attacker would only be able to swap the // final directory component), which is not possible to emulate with -// MkdirAll. +// [MkdirAll]. // // In addition, the returned handle is obtained far more efficiently than doing -// a brand new lookup of unsafePath (such as with SecureJoin or openat2) after -// doing MkdirAll. If you intend to open the directory after creating it, you +// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after +// doing [MkdirAll]. If you intend to open the directory after creating it, you // should use MkdirAllHandle. func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { // Make sure there are no os.FileMode bits set. if mode&^0o7777 != 0 { return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) } + // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid + // bits. We could also silently ignore them but since we have very few + // users it seems more prudent to return an error so users notice that + // these bits will not be set. + if mode&^0o1777 != 0 { + return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) + } // Try to open as much of the path as possible. currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) @@ -101,24 +107,6 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err // Make sure the mode doesn't have any type bits. mode &^= unix.S_IFMT - // What properties do we expect any newly created directories to have? - var ( - // While umask(2) is a per-thread property, and thus this value could - // vary between threads, a functioning Go program would LockOSThread - // threads with different umasks and so we don't need to LockOSThread - // for this entire mkdirat loop (if we are in the locked thread with a - // different umask, we are already locked and there's nothing for us to - // do -- and if not then it doesn't matter which thread we run on and - // there's nothing for us to do). - expectedMode = uint32(unix.S_IFDIR | (mode &^ getUmask())) - - // We would want to get the fs[ug]id here, but we can't access those - // from userspace. In practice, nobody uses setfs[ug]id() anymore, so - // just use the effective [ug]id (which is equivalent to the fs[ug]id - // for programs that don't use setfs[ug]id). - expectedUid = uint32(unix.Geteuid()) - expectedGid = uint32(unix.Getegid()) - ) // Create the remaining components. for _, part := range remainingParts { @@ -129,7 +117,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err } // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely - // create the finaly component without worrying about symlink-exchange + // create the final component without worrying about symlink-exchange // attacks. if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil { err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} @@ -157,40 +145,30 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err _ = currentDir.Close() currentDir = nextDir - // Make sure that the directory matches what we expect. An attacker - // could have swapped the directory between us making it and opening - // it. There's no way for us to be sure that the directory is - // _precisely_ the same as the directory we created, but if we are in - // an empty directory with the same owner and mode as the one we - // created then there is nothing the attacker could do with this new - // directory that they couldn't do with the old one. - if stat, err := fstat(currentDir); err != nil { - return nil, fmt.Errorf("check newly created directory: %w", err) - } else { - if stat.Mode != expectedMode { - return nil, fmt.Errorf("%w: newly created directory %q has incorrect mode 0o%.3o (expected 0o%.3o)", errPossibleAttack, currentDir.Name(), stat.Mode, expectedMode) - } - if stat.Uid != expectedUid || stat.Gid != expectedGid { - return nil, fmt.Errorf("%w: newly created directory %q has incorrect owner %d:%d (expected %d:%d)", errPossibleAttack, currentDir.Name(), stat.Uid, stat.Gid, expectedUid, expectedGid) - } - // Check that the directory is empty. We only need to check for - // a single entry, and we should get EOF if the directory is - // empty. - _, err := currentDir.Readdirnames(1) - if !errors.Is(err, io.EOF) { - if err == nil { - err = fmt.Errorf("%w: newly created directory %q is non-empty", errPossibleAttack, currentDir.Name()) - } - return nil, fmt.Errorf("check if newly created directory %q is empty: %w", currentDir.Name(), err) - } - // Reset the offset. - _, _ = currentDir.Seek(0, unix.SEEK_SET) - } + // It's possible that the directory we just opened was swapped by an + // attacker. Unfortunately there isn't much we can do to protect + // against this, and MkdirAll's behaviour is that we will reuse + // existing directories anyway so the need to protect against this is + // incredibly limited (and arguably doesn't even deserve mention here). + // + // Ideally we might want to check that the owner and mode match what we + // would've created -- unfortunately, it is non-trivial to verify that + // the owner and mode of the created directory match. While plain Unix + // DAC rules seem simple enough to emulate, there are a bunch of other + // factors that can change the mode or owner of created directories + // (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on + // filesystems like vfat, etc etc). We used to try to verify this but + // it just lead to a series of spurious errors. + // + // We could also check that the directory is non-empty, but + // unfortunately some pseduofilesystems (like cgroupfs) create + // non-empty directories, which would result in different spurious + // errors. } return currentDir, nil } -// MkdirAll is a race-safe alternative to the Go stdlib's os.MkdirAll function, +// MkdirAll is a race-safe alternative to the [os.MkdirAll] function, // where the new directory is guaranteed to be within the root directory (if an // attacker can move directories from inside the root to outside the root, the // created directory tree might be outside of the root but the key constraint @@ -203,16 +181,16 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err // err := os.MkdirAll(path, mode) // // But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between SecureJoin and MkdirAll, it is +// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is // possible for MkdirAll to resolve unsafe symlink components and create // directories outside of the root. // // If you plan to open the directory after you have created it or want to use -// an open directory handle as the root, you should use MkdirAllHandle instead. -// This function is a wrapper around MkdirAllHandle. +// an open directory handle as the root, you should use [MkdirAllHandle] instead. +// This function is a wrapper around [MkdirAllHandle]. // // NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not -// the Go generic mode bits (os.Mode...). +// the Go generic mode bits ([os.FileMode]...). func MkdirAll(root, unsafePath string, mode int) error { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go index 52dce76f3..230be73f0 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" ) -// OpenatInRoot is equivalent to OpenInRoot, except that the root is provided -// using an *os.File handle, to ensure that the correct root directory is used. +// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided +// using an *[os.File] handle, to ensure that the correct root directory is used. func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { handle, err := completeLookupInRoot(root, unsafePath) if err != nil { @@ -31,7 +31,7 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { // handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) // // But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between SecureJoin and OpenFile, it is +// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is // possible for the returned file to be outside of the root. // // Note that the returned handle is an O_PATH handle, meaning that only a very @@ -39,7 +39,7 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { // accidentally opening an untrusted file that could cause issues (such as a // disconnected TTY that could cause a DoS, or some other issue). In order to // use the returned handle, you can "upgrade" it to a proper handle using -// Reopen. +// [Reopen]. func OpenInRoot(root, unsafePath string) (*os.File, error) { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { @@ -49,7 +49,7 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) { return OpenatInRoot(rootDir, unsafePath) } -// Reopen takes an *os.File handle and re-opens it through /proc/self/fd. +// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd. // Reopen(file, flags) is effectively equivalent to // // fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) @@ -59,7 +59,9 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) { // maliciously-configured /proc mount. While this attack scenario is not // common, in container runtimes it is possible for higher-level runtimes to be // tricked into configuring an unsafe /proc that can be used to attack file -// operations. See CVE-2019-19921 for more details. +// operations. See [CVE-2019-19921] for more details. +// +// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw func Reopen(handle *os.File, flags int) (*os.File, error) { procRoot, err := getProcRoot() if err != nil { diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go index 921b3e1d4..ae3b381ef 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go @@ -13,34 +13,21 @@ import ( "path/filepath" "strings" "sync" - "testing" "golang.org/x/sys/unix" ) -var ( - hasOpenat2Bool bool - hasOpenat2Once sync.Once - - testingForceHasOpenat2 *bool -) - -func hasOpenat2() bool { - if testing.Testing() && testingForceHasOpenat2 != nil { - return *testingForceHasOpenat2 - } - hasOpenat2Once.Do(func() { - fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, - }) - if err == nil { - hasOpenat2Bool = true - _ = unix.Close(fd) - } +var hasOpenat2 = sync.OnceValue(func() bool { + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, }) - return hasOpenat2Bool -} + if err != nil { + return false + } + _ = unix.Close(fd) + return true +}) func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go index adf0bd08f..8cc827d70 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go @@ -54,33 +54,26 @@ func verifyProcRoot(procRoot *os.File) error { return nil } -var ( - hasNewMountApiBool bool - hasNewMountApiOnce sync.Once -) - -func hasNewMountApi() bool { - hasNewMountApiOnce.Do(func() { - // All of the pieces of the new mount API we use (fsopen, fsconfig, - // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can - // just check for one of the syscalls and the others should also be - // available. - // - // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. - // This is equivalent to openat(2), but tells us if open_tree is - // available (and thus all of the other basic new mount API syscalls). - // open_tree(2) is most light-weight syscall to test here. - // - // [1]: merge commit 400913252d09 - // [2]: - fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) - if err == nil { - hasNewMountApiBool = true - _ = unix.Close(fd) - } - }) - return hasNewMountApiBool -} +var hasNewMountApi = sync.OnceValue(func() bool { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err != nil { + return false + } + _ = unix.Close(fd) + return true +}) func fsopen(fsName string, flags int) (*os.File, error) { // Make sure we always set O_CLOEXEC. @@ -141,7 +134,7 @@ func clonePrivateProcMount() (_ *os.File, Err error) { // we can be sure there are no over-mounts and so if the root is valid then // we're golden. Otherwise, we have to deal with over-mounts. procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) - if err != nil || testingForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { + if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) } if err != nil { @@ -159,27 +152,19 @@ func clonePrivateProcMount() (_ *os.File, Err error) { } func privateProcRoot() (*os.File, error) { - if !hasNewMountApi() || testingForceGetProcRootUnsafe() { + if !hasNewMountApi() || hookForceGetProcRootUnsafe() { return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) } // Try to create a new procfs mount from scratch if we can. This ensures we // can get a procfs mount even if /proc is fake (for whatever reason). procRoot, err := newPrivateProcMount() - if err != nil || testingForcePrivateProcRootOpenTree(procRoot) { + if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { // Try to clone /proc then... procRoot, err = clonePrivateProcMount() } return procRoot, err } -var ( - procRootHandle *os.File - procRootError error - procRootOnce sync.Once - - errUnsafeProcfs = errors.New("unsafe procfs detected") -) - func unsafeHostProcRoot() (_ *os.File, Err error) { procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { @@ -207,17 +192,15 @@ func doGetProcRoot() (*os.File, error) { return procRoot, err } -func getProcRoot() (*os.File, error) { - procRootOnce.Do(func() { - procRootHandle, procRootError = doGetProcRoot() - }) - return procRootHandle, procRootError -} +var getProcRoot = sync.OnceValues(func() (*os.File, error) { + return doGetProcRoot() +}) -var ( - haveProcThreadSelf bool - haveProcThreadSelfOnce sync.Once -) +var hasProcThreadSelf = sync.OnceValue(func() bool { + return unix.Access("/proc/thread-self/", unix.F_OK) == nil +}) + +var errUnsafeProcfs = errors.New("unsafe procfs detected") type procThreadSelfCloser func() @@ -230,13 +213,6 @@ type procThreadSelfCloser func() // This is similar to ProcThreadSelf from runc, but with extra hardening // applied and using *os.File. func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { - haveProcThreadSelfOnce.Do(func() { - // If the kernel doesn't support thread-self, it doesn't matter which - // /proc handle we use. - _, err := fstatatFile(procRoot, "thread-self", unix.AT_SYMLINK_NOFOLLOW) - haveProcThreadSelf = (err == nil) - }) - // We need to lock our thread until the caller is done with the handle // because between getting the handle and using it we could get interrupted // by the Go runtime and hit the case where the underlying thread is @@ -251,10 +227,10 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread // Figure out what prefix we want to use. threadSelf := "thread-self/" - if !haveProcThreadSelf || testingForceProcSelfTask() { + if !hasProcThreadSelf() || hookForceProcSelfTask() { /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" - if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || testingForceProcSelf() { + if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { // In this case, we running in a pid namespace that doesn't match // the /proc mount we have. This can happen inside runc. // @@ -275,7 +251,7 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread // absolutely sure we are operating on a clean /proc handle that // doesn't have any cheeky overmounts that could trick us (including // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't - // stricly needed, but just use it since we have it. + // strictly needed, but just use it since we have it. // // NOTE: /proc/self is technically a magic-link (the contents of the // symlink are generated dynamically), but it doesn't use @@ -313,24 +289,16 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread return handle, runtime.UnlockOSThread, nil } -var ( - hasStatxMountIdBool bool - hasStatxMountIdOnce sync.Once -) - -func hasStatxMountId() bool { - hasStatxMountIdOnce.Do(func() { - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) - hasStatxMountIdBool = (err == nil && (stx.Mask&wantStxMask != 0)) - }) - return hasStatxMountIdBool -} +var hasStatxMountId = sync.OnceValue(func() bool { + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) + return err == nil && stx.Mask&wantStxMask != 0 +}) func getMountId(dir *os.File, path string) (uint64, error) { // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. @@ -443,22 +411,6 @@ func isDeadInode(file *os.File) error { return nil } -func getUmask() int { - // umask is a per-thread property, but it is inherited by children, so we - // need to lock our OS thread to make sure that no other goroutine runs in - // this thread and no goroutines are spawned from this thread until we - // revert to the old umask. - // - // We could parse /proc/self/status to avoid this get-set problem, but - // /proc/thread-self requires LockOSThread anyway, so there's no real - // benefit over just using umask(2). - runtime.LockOSThread() - umask := unix.Umask(0) - unix.Umask(umask) - runtime.UnlockOSThread() - return umask -} - func checkProcSelfFdPath(path string, file *os.File) error { if err := isDeadInode(file); err != nil { return err @@ -472,3 +424,17 @@ func checkProcSelfFdPath(path string, file *os.File) error { } return nil } + +// Test hooks used in the procfs tests to verify that the fallback logic works. +// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. +var ( + hookForcePrivateProcRootOpenTree = hookDummyFile + hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile + hookForceGetProcRootUnsafe = hookDummy + + hookForceProcSelfTask = hookDummy + hookForceProcSelf = hookDummy +) + +func hookDummy() bool { return false } +func hookDummyFile(_ *os.File) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go deleted file mode 100644 index a3aedf03d..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "os" - "testing" -) - -type forceGetProcRootLevel int - -const ( - forceGetProcRootDefault forceGetProcRootLevel = iota - forceGetProcRootOpenTree // force open_tree() - forceGetProcRootOpenTreeAtRecursive // force open_tree(AT_RECURSIVE) - forceGetProcRootUnsafe // force open() -) - -var testingForceGetProcRoot *forceGetProcRootLevel - -func testingCheckClose(check bool, f *os.File) bool { - if check { - if f != nil { - _ = f.Close() - } - return true - } - return false -} - -func testingForcePrivateProcRootOpenTree(f *os.File) bool { - return testing.Testing() && testingForceGetProcRoot != nil && - testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTree, f) -} - -func testingForcePrivateProcRootOpenTreeAtRecursive(f *os.File) bool { - return testing.Testing() && testingForceGetProcRoot != nil && - testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTreeAtRecursive, f) -} - -func testingForceGetProcRootUnsafe() bool { - return testing.Testing() && testingForceGetProcRoot != nil && - *testingForceGetProcRoot >= forceGetProcRootUnsafe -} - -type forceProcThreadSelfLevel int - -const ( - forceProcThreadSelfDefault forceProcThreadSelfLevel = iota - forceProcSelfTask - forceProcSelf -) - -var testingForceProcThreadSelf *forceProcThreadSelfLevel - -func testingForceProcSelfTask() bool { - return testing.Testing() && testingForceProcThreadSelf != nil && - *testingForceProcThreadSelf >= forceProcSelfTask -} - -func testingForceProcSelf() bool { - return testing.Testing() && testingForceProcThreadSelf != nil && - *testingForceProcThreadSelf >= forceProcSelf -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index 6e27c7dd8..36373f8c5 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -10,19 +10,19 @@ import "os" // are several projects (umoci and go-mtree) that are using this sort of // interface. -// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is -// equivalent to using the standard os.* family of functions. This is mainly +// VFS is the minimal interface necessary to use [SecureJoinVFS]. A nil VFS is +// equivalent to using the standard [os].* family of functions. This is mainly // used for the purposes of mock testing, but also can be used to otherwise use -// SecureJoin with VFS-like system. +// [SecureJoinVFS] with VFS-like system. type VFS interface { - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. These semantics are identical to - // os.Lstat. + // Lstat returns an [os.FileInfo] describing the named file. If the + // file is a symbolic link, the returned [os.FileInfo] describes the + // symbolic link. Lstat makes no attempt to follow the link. + // The semantics are identical to [os.Lstat]. Lstat(name string) (os.FileInfo, error) - // Readlink returns the destination of the named symbolic link. These - // semantics are identical to os.Readlink. + // Readlink returns the destination of the named symbolic link. + // The semantics are identical to [os.Readlink]. Readlink(name string) (string, error) } @@ -30,12 +30,6 @@ type VFS interface { // module. type osVFS struct{} -// Lstat returns a FileInfo describing the named file. If the file is a -// symbolic link, the returned FileInfo describes the symbolic link. Lstat -// makes no attempt to follow the link. These semantics are identical to -// os.Lstat. func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } -// Readlink returns the destination of the named symbolic link. These -// semantics are identical to os.Readlink. func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index f831735f8..93d64cd8d 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.46" + DefaultVersion = "1.47" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 78f0ce1f2..7164e1eba 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.46" +basePath: "/v1.47" info: title: "Docker Engine API" - version: "1.46" + version: "1.47" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.46) is used. - For example, calling `/info` is the same as calling `/v1.46/info`. Using the + If you omit the version-prefix, the current version of the API (v1.47) is used. + For example, calling `/info` is the same as calling `/v1.47/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -393,7 +393,7 @@ definitions: Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to `true` in conjunction). - Addded in v1.44, before that version all read-only mounts were + Added in v1.44, before that version all read-only mounts were non-recursive by default. To match the previous behaviour this will default to `true` for clients on versions prior to v1.44. type: "boolean" @@ -1384,7 +1384,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" example: "" Domainname: @@ -1394,7 +1394,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" example: "" User: @@ -1408,7 +1408,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1419,7 +1419,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1430,7 +1430,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1457,7 +1457,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1468,7 +1468,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1479,7 +1479,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1516,7 +1516,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" default: "" example: "" @@ -1555,7 +1555,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1567,7 +1567,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "string" default: "" example: "" @@ -1601,7 +1601,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "integer" default: 10 x-nullable: true @@ -2216,7 +2216,7 @@ definitions: Created: description: | Date and time at which the image was created as a Unix timestamp - (number of seconds sinds EPOCH). + (number of seconds since EPOCH). type: "integer" x-nullable: false example: "1644009612" @@ -2265,6 +2265,19 @@ definitions: x-nullable: false type: "integer" example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" AuthConfig: type: "object" @@ -2500,7 +2513,7 @@ definitions: example: false Attachable: description: | - Wheter a global / swarm scope network is manually attachable by regular + Whether a global / swarm scope network is manually attachable by regular containers from workers in swarm mode. type: "boolean" default: false @@ -3723,7 +3736,7 @@ definitions: example: "json-file" Options: description: | - Driver-specific options for the selectd log driver, specified + Driver-specific options for the selected log driver, specified as key/value pairs. type: "object" additionalProperties: @@ -5318,7 +5331,7 @@ definitions: description: | The default (and highest) API version that is supported by the daemon type: "string" - example: "1.46" + example: "1.47" MinAPIVersion: description: | The minimum API version that is supported by the daemon @@ -5334,7 +5347,7 @@ definitions: The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" - example: "go1.21.12" + example: "go1.22.7" Os: description: | The operating system that the daemon is running on ("linux" or "windows") @@ -5830,13 +5843,13 @@ definitions: - "/var/run/cdi" Containerd: $ref: "#/definitions/ContainerdInfo" - x-nullable: true ContainerdInfo: description: | Information for connecting to the containerd instance that is used by the daemon. This is included for debugging purposes only. type: "object" + x-nullable: true properties: Address: description: "The address of the containerd socket." @@ -6644,6 +6657,120 @@ definitions: additionalProperties: type: "string" + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + paths: /containers/json: get: @@ -7585,7 +7712,7 @@ paths: * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] @@ -8622,6 +8749,11 @@ paths: description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false tags: ["Image"] /build: post: @@ -9094,12 +9226,23 @@ paths: parameters: - name: "name" in: "path" - description: "Image name or ID." + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. type: "string" required: true - name: "tag" in: "query" - description: "The tag to associate with the image on the registry." + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. type: "string" - name: "X-Registry-Auth" in: "header" diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index 727da8839..03648fb7b 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "errors" "fmt" "strings" @@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error { if policy.MaximumRetryCount < 0 { msg += " and cannot be negative" } - return &errInvalidParameter{fmt.Errorf(msg)} + return &errInvalidParameter{errors.New(msg)} } return nil case RestartPolicyOnFailure: if policy.MaximumRetryCount < 0 { - return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} + return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")} } return nil case "": diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 0c39ab5f1..0914b2a44 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -196,7 +196,7 @@ func (args Args) Match(field, source string) bool { } // GetBoolOrDefault returns a boolean value of the key if the key is present -// and is intepretable as a boolean value. Otherwise the default value is returned. +// and is interpretable as a boolean value. Otherwise the default value is returned. // Error is not nil only if the filter values are not valid boolean or are conflicting. func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { fieldValues, ok := args.fields[key] diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go new file mode 100644 index 000000000..db8a00830 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/manifest.go @@ -0,0 +1,99 @@ +package image + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ManifestKind string + +const ( + ManifestKindImage ManifestKind = "image" + ManifestKindAttestation ManifestKind = "attestation" + ManifestKindUnknown ManifestKind = "unknown" +) + +type ManifestSummary struct { + // ID is the content-addressable ID of an image and is the same as the + // digest of the image manifest. + // + // Required: true + ID string `json:"ID"` + + // Descriptor is the OCI descriptor of the image. + // + // Required: true + Descriptor ocispec.Descriptor `json:"Descriptor"` + + // Indicates whether all the child content (image config, layers) is + // fully available locally + // + // Required: true + Available bool `json:"Available"` + + // Size is the size information of the content related to this manifest. + // Note: These sizes only take the locally available content into account. + // + // Required: true + Size struct { + // Content is the size (in bytes) of all the locally present + // content in the content store (e.g. image config, layers) + // referenced by this manifest and its children. + // This only includes blobs in the content store. + Content int64 `json:"Content"` + + // Total is the total size (in bytes) of all the locally present + // data (both distributable and non-distributable) that's related to + // this manifest and its children. + // This equal to the sum of [Content] size AND all the sizes in the + // [Size] struct present in the Kind-specific data struct. + // For example, for an image kind (Kind == ManifestKindImage), + // this would include the size of the image content and unpacked + // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Total int64 `json:"Total"` + } `json:"Size"` + + // Kind is the kind of the image manifest. + // + // Required: true + Kind ManifestKind `json:"Kind"` + + // Fields below are specific to the kind of the image manifest. + + // Present only if Kind == ManifestKindImage. + ImageData *ImageProperties `json:"ImageData,omitempty"` + + // Present only if Kind == ManifestKindAttestation. + AttestationData *AttestationProperties `json:"AttestationData,omitempty"` +} + +type ImageProperties struct { + // Platform is the OCI platform object describing the platform of the image. + // + // Required: true + Platform ocispec.Platform `json:"Platform"` + + Size struct { + // Unpacked is the size (in bytes) of the locally unpacked + // (uncompressed) image content that's directly usable by the containers + // running this image. + // It's independent of the distributable content - e.g. + // the image might still have an unpacked data that's still used by + // some container even when the distributable/compressed content is + // already gone. + // + // Required: true + Unpacked int64 `json:"Unpacked"` + } + + // Containers is an array containing the IDs of the containers that are + // using this image. + // + // Required: true + Containers []string `json:"Containers"` +} + +type AttestationProperties struct { + // For is the digest of the image manifest that this attestation is for. + For digest.Digest `json:"For"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 8e32c9af8..923ebe5a0 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -76,6 +76,9 @@ type ListOptions struct { // ContainerCount indicates whether container count should be computed. ContainerCount bool + + // Manifests indicates whether the image manifests should be returned. + Manifests bool } // RemoveOptions holds parameters to remove images. diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go index f1e3e2ef0..e87e216a2 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,10 +1,5 @@ package image -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Summary summary -// swagger:model Summary type Summary struct { // Number of containers using this image. Includes both stopped and running @@ -17,7 +12,7 @@ type Summary struct { Containers int64 `json:"Containers"` // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). + // (number of seconds since EPOCH). // // Required: true Created int64 `json:"Created"` @@ -47,6 +42,14 @@ type Summary struct { // Required: true ParentID string `json:"ParentId"` + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` + // List of content-addressable digests of locally available image manifests // that the image is referenced from. Multiple manifests can refer to the // same image. diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index 97a924e37..8e383f6e6 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -34,10 +34,9 @@ type AuthConfig struct { } // EncodeAuthConfig serializes the auth configuration as a base64url encoded -// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. +// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 func EncodeAuthConfig(authConfig AuthConfig) (string, error) { buf, err := json.Marshal(authConfig) if err != nil { @@ -46,15 +45,14 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) { return base64.URLEncoding.EncodeToString(buf), nil } -// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON +// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON // authentication information as sent through the X-Registry-Auth header. // -// This function always returns an AuthConfig, even if an error occurs. It is up +// This function always returns an [AuthConfig], even if an error occurs. It is up // to the caller to decide if authentication is required, and if the error can // be ignored. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { if authEncoded == "" { return &AuthConfig{}, nil @@ -69,7 +67,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { // clients and API versions. Current clients and API versions expect authentication // to be provided through the X-Registry-Auth header. // -// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an +// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an // error occurs. It is up to the caller to decide if authentication is required, // and if the error can be ignored. func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 3eae4b9b2..1b4be6fff 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -122,7 +122,7 @@ type CAConfig struct { SigningCAKey string `json:",omitempty"` // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. + // then the swarm is forced to generate a new root certificate and key. ForceRotate uint64 `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go index bbd9ff0b8..618a48162 100644 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -414,7 +414,7 @@ type Info struct { // the Volume has not been successfully created yet. VolumeID string `json:",omitempty"` - // AccessibleTopolgoy is the topology this volume is actually accessible + // AccessibleTopology is the topology this volume is actually accessible // from. AccessibleTopology []Topology `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a9cc1e21e..bef679431 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -11,6 +11,11 @@ import ( ) // ImageList returns a list of images in the docker host. +// +// Experimental: Setting the [options.Manifest] will populate +// [image.Summary.Manifests] with information about image manifests. +// This is experimental and might change in the future without any backward +// compatibility. func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { var images []image.Summary @@ -47,6 +52,9 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([] if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { query.Set("shared-size", "1") } + if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") { + query.Set("manifests", "1") + } serverResp, err := cli.get(ctx, "/images/json", query, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 45ac2aa6c..b9d2a538a 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -6,8 +6,8 @@ import ( "path/filepath" "strings" - "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/system" + "github.com/moby/sys/userns" "github.com/pkg/errors" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 035160c83..8d2c8857f 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -290,7 +290,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, } // Stream is an io.Writer for output with utilities to get the output's file -// descriptor and to detect wether it's a terminal. +// descriptor and to detect whether it's a terminal. // // it is subset of the streams.Out type in // https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go index 4f33ad26b..3d095579e 100644 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go +++ b/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go @@ -1,4 +1,4 @@ -// Package meminfo provides utilites to retrieve memory statistics of +// Package meminfo provides utilities to retrieve memory statistics of // the host system. package meminfo diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go index 3792c67a9..3ea3012b1 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -124,7 +124,7 @@ func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { } // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. +// into the pool and closes the writer if it's an io.WriteCloser. func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { return ioutils.NewWriteCloserWrapper(w, func() error { buf.Flush() diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index facfbb312..b877ecc5a 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -6,7 +6,7 @@ import ( // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. +// It returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { sysErr := func(err error) ([]byte, error) { return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err} diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/spec_file_generator.go b/vendor/github.com/docker/go-plugins-helpers/sdk/spec_file_generator.go index bc8cfc644..c9cdbb2f4 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/spec_file_generator.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/spec_file_generator.go @@ -2,7 +2,6 @@ package sdk import ( "fmt" - "io/ioutil" "os" "path/filepath" ) @@ -50,7 +49,7 @@ func writeSpecFile(name, address, pluginSpecDir string, proto protocol) (string, specFileDir := filepath.Join(pluginSpecDir, name+".spec") url := string(proto) + "://" + address - if err := ioutil.WriteFile(specFileDir, []byte(url), 0644); err != nil { + if err := os.WriteFile(specFileDir, []byte(url), 0644); err != nil { return "", err } diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener.go b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener.go index 54b9a6d31..00bc911f6 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener.go @@ -1,4 +1,4 @@ -// +build linux freebsd +//go:build linux || freebsd package sdk diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_nosystemd.go b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_nosystemd.go index a798b8722..1a9247b0f 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_nosystemd.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_nosystemd.go @@ -1,10 +1,10 @@ -// +build linux freebsd -// +build nosystemd +//go:build (linux || freebsd) && nosystemd package sdk import "net" +// FIXME(thaJeztah): this code was added in https://github.com/docker/go-plugins-helpers/commit/008703b825c10311af1840deeaf5f4769df7b59e, but is not used anywhere func setupSocketActivation() (net.Listener, error) { return nil, nil } diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_systemd.go b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_systemd.go index 5d5d8f427..59c53bf11 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_systemd.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_systemd.go @@ -1,5 +1,4 @@ -// +build linux freebsd -// +build !nosystemd +//go:build (linux || freebsd) && !nosystemd package sdk @@ -25,6 +24,7 @@ func isRunningSystemd() bool { return fi.IsDir() } +// FIXME(thaJeztah): this code was added in https://github.com/docker/go-plugins-helpers/commit/008703b825c10311af1840deeaf5f4769df7b59e, but is not used anywhere func setupSocketActivation() (net.Listener, error) { if !isRunningSystemd() { return nil, nil diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_unsupported.go b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_unsupported.go index 344cf751b..a472f06ee 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_unsupported.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/unix_listener_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd +//go:build !linux && !freebsd package sdk @@ -7,10 +7,6 @@ import ( "net" ) -var ( - errOnlySupportedOnLinuxAndFreeBSD = errors.New("unix socket creation is only supported on Linux and FreeBSD") -) - func newUnixListener(pluginName string, gid int) (net.Listener, string, error) { - return nil, "", errOnlySupportedOnLinuxAndFreeBSD + return nil, "", errors.New("unix socket creation is only supported on Linux and FreeBSD") } diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener.go b/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener.go index b5deaba6d..afb0f0b6b 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener.go @@ -1,4 +1,4 @@ -// +build windows +//go:build windows package sdk @@ -14,20 +14,19 @@ import ( // Named pipes use Windows Security Descriptor Definition Language to define ACL. Following are // some useful definitions. const ( - // This will set permissions for everyone to have full access + // AllowEveryone grants full access permissions for everyone. AllowEveryone = "S:(ML;;NW;;;LW)D:(A;;0x12019f;;;WD)" - // This will set permissions for Service, System, Adminstrator group and account to have full access + // AllowServiceSystemAdmin grants full access permissions for Service, System, Administrator group and account. AllowServiceSystemAdmin = "D:(A;ID;FA;;;SY)(A;ID;FA;;;BA)(A;ID;FA;;;LA)(A;ID;FA;;;LS)" ) func newWindowsListener(address, pluginName, daemonRoot string, pipeConfig *WindowsPipeConfig) (net.Listener, string, error) { - winioPipeConfig := winio.PipeConfig{ + listener, err := winio.ListenPipe(address, &winio.PipeConfig{ SecurityDescriptor: pipeConfig.SecurityDescriptor, InputBufferSize: pipeConfig.InBufferSize, OutputBufferSize: pipeConfig.OutBufferSize, - } - listener, err := winio.ListenPipe(address, &winioPipeConfig) + }) if err != nil { return nil, "", err } @@ -48,7 +47,7 @@ func newWindowsListener(address, pluginName, daemonRoot string, pipeConfig *Wind func windowsCreateDirectoryWithACL(name string) error { sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + const sddl = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" sd, err := winio.SddlToSecurityDescriptor(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} diff --git a/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener_unsupported.go b/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener_unsupported.go index 0f5e113c1..1b187d0d8 100644 --- a/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener_unsupported.go +++ b/vendor/github.com/docker/go-plugins-helpers/sdk/windows_listener_unsupported.go @@ -1,4 +1,4 @@ -// +build !windows +//go:build !windows package sdk @@ -7,12 +7,8 @@ import ( "net" ) -var ( - errOnlySupportedOnWindows = errors.New("named pipe creation is only supported on Windows") -) - func newWindowsListener(address, pluginName, daemonRoot string, pipeConfig *WindowsPipeConfig) (net.Listener, string, error) { - return nil, "", errOnlySupportedOnWindows + return nil, "", errors.New("named pipe creation is only supported on Windows") } func windowsCreateDirectoryWithACL(name string) error { diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b..f4e7dbf37 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b..daea9dd6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e575754..fa854785d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d..e4ac2a2ff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd..c349c326c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e4..36c311694 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a..d8de5ab76 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c01..5eb5dbc66 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d6..c54a63083 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc49..0760efe91 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b88..f65e8fe3e 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78..a29fc7aab 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index 0eea041e4..e5dafe0fa 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -318,8 +318,9 @@ type VolumeOptions struct { // TempfsOptions contains optional configuration for the tempfs type type TempfsOptions struct { - SizeBytes int64 `json:"SizeBytes,omitempty" yaml:"SizeBytes,omitempty" toml:"SizeBytes,omitempty"` - Mode int `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` + SizeBytes int64 `json:"SizeBytes,omitempty" yaml:"SizeBytes,omitempty" toml:"SizeBytes,omitempty"` + Mode int `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` + Options [][]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` } // VolumeDriverConfig holds a map of volume driver specific options diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 28bdd2fc0..6f717dbd8 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,27 @@ +# v4.0.4 + +## Fixed + + - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a + breaking change. See #136 / #137. + +# v4.0.3 + +## Changed + + - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) + - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) + - Dependency updates + +# v4.0.2 + +## Changed + + - Improved documentation of Verify() to note that JSONWebKeySet is a supported + argument type (#104) + - Defined exported error values for missing x5c header and unsupported elliptic + curves error cases (#117) + # v4.0.1 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go index aba08424c..d81b03b44 100644 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } decrypter, err := newDecrypter(key) if err != nil { return nil, err @@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index a565aaab2..8a5284210 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) { return key.K.bytes(), nil } -func tryJWKS(key interface{}, headers ...Header) interface{} { +var ( + // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a + // key ID which matches one in the provided tokens headers. + ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set") +) + +func tryJWKS(key interface{}, headers ...Header) (interface{}, error) { var jwks JSONWebKeySet switch jwksType := key.(type) { @@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { case JSONWebKeySet: jwks = jwksType default: - return key + // If the specified key is not a JWKS, return as is. + return key, nil } + // Determine the KID to search for from the headers. var kid string for _, header := range headers { if header.KeyID != "" { @@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { } } + // If no KID is specified in the headers, reject. if kid == "" { - return key + return nil, ErrJWKSKidNotFound } + // Find the JWK with the matching KID. If no JWK with the specified KID is + // found, reject. keys := jwks.Key(kid) if len(keys) == 0 { - return key + return nil, ErrJWKSKidNotFound } - return keys[0].Key + return keys[0].Key, nil } diff --git a/vendor/github.com/go-jose/go-jose/v4/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go index 68db085ef..429427232 100644 --- a/vendor/github.com/go-jose/go-jose/v4/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go @@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig } // OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key. +// +// Note: this cannot currently be implemented outside this package because of its +// unexported method. type OpaqueKeyEncrypter interface { // KeyID returns the kid KeyID() string diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go index 46c9a4d96..3dec0112b 100644 --- a/vendor/github.com/go-jose/go-jose/v4/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return err + } verifier, err := newVerifier(key) if err != nil { return err @@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return -1, Signature{}, err + } verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err diff --git a/vendor/github.com/godbus/dbus/v5/.cirrus.yml b/vendor/github.com/godbus/dbus/v5/.cirrus.yml index 4e900f86d..75a0d798a 100644 --- a/vendor/github.com/godbus/dbus/v5/.cirrus.yml +++ b/vendor/github.com/godbus/dbus/v5/.cirrus.yml @@ -1,10 +1,10 @@ freebsd_instance: - image_family: freebsd-13-0 + image_family: freebsd-14-0 task: name: Test on FreeBSD - install_script: pkg install -y go119 dbus + install_script: pkg install -y go122 dbus test_script: | /usr/local/etc/rc.d/dbus onestart && \ eval `dbus-launch --sh-syntax` && \ - go119 test -v ./... + go122 test -v ./... diff --git a/vendor/github.com/godbus/dbus/v5/README.md b/vendor/github.com/godbus/dbus/v5/README.md index 5c6b19655..da848a98d 100644 --- a/vendor/github.com/godbus/dbus/v5/README.md +++ b/vendor/github.com/godbus/dbus/v5/README.md @@ -14,7 +14,7 @@ D-Bus message bus system. ### Installation -This packages requires Go 1.12 or later. It can be installed by running the command below: +This packages requires Go 1.20 or later. It can be installed by running the command below: ``` go get github.com/godbus/dbus/v5 diff --git a/vendor/github.com/godbus/dbus/v5/conn_other.go b/vendor/github.com/godbus/dbus/v5/conn_other.go index 067e67cc5..1e9959446 100644 --- a/vendor/github.com/godbus/dbus/v5/conn_other.go +++ b/vendor/github.com/godbus/dbus/v5/conn_other.go @@ -7,7 +7,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "os/exec" "os/user" @@ -61,7 +60,7 @@ func tryDiscoverDbusSessionBusAddress() string { // text file // containing the address of the socket, e.g.: // DBUS_SESSION_BUS_ADDRESS=unix:abstract=/tmp/dbus-E1c73yNqrG - if f, err := ioutil.ReadFile(runUserSessionDbusFile); err == nil { + if f, err := os.ReadFile(runUserSessionDbusFile); err == nil { fileContent := string(f) prefix := "DBUS_SESSION_BUS_ADDRESS=" diff --git a/vendor/github.com/godbus/dbus/v5/decoder.go b/vendor/github.com/godbus/dbus/v5/decoder.go index 97a827b83..d03bdd8e8 100644 --- a/vendor/github.com/godbus/dbus/v5/decoder.go +++ b/vendor/github.com/godbus/dbus/v5/decoder.go @@ -370,12 +370,6 @@ func (c *stringConverter) String(b []byte) string { } // toString converts a byte slice to a string without allocating. -// Starting from Go 1.20 you should use unsafe.String. func toString(b []byte) string { - var s string - h := (*reflect.StringHeader)(unsafe.Pointer(&s)) - h.Data = uintptr(unsafe.Pointer(&b[0])) - h.Len = len(b) - - return s + return unsafe.String(&b[0], len(b)) } diff --git a/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go b/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go index a61a82084..a08d0891c 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go +++ b/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go @@ -5,8 +5,8 @@ package dbus import ( "errors" - "io/ioutil" "net" + "os" ) func init() { @@ -28,7 +28,7 @@ func newNonceTcpTransport(keys string) (transport, error) { if err != nil { return nil, err } - b, err := ioutil.ReadFile(noncefile) + b, err := os.ReadFile(noncefile) if err != nil { return nil, err } diff --git a/vendor/github.com/kevinburke/ssh_config/.gitattributes b/vendor/github.com/kevinburke/ssh_config/.gitattributes new file mode 100644 index 000000000..44db58188 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/.gitattributes @@ -0,0 +1 @@ +testdata/dos-lines eol=crlf diff --git a/vendor/github.com/kevinburke/ssh_config/.gitignore b/vendor/github.com/kevinburke/ssh_config/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/kevinburke/ssh_config/.mailmap b/vendor/github.com/kevinburke/ssh_config/.mailmap new file mode 100644 index 000000000..253406b1c --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/.mailmap @@ -0,0 +1 @@ +Kevin Burke Kevin Burke diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt new file mode 100644 index 000000000..311aeb1b4 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt @@ -0,0 +1,9 @@ +Carlos A Becker +Dustin Spicuzza +Eugene Terentev +Kevin Burke +Mark Nevill +Scott Lessans +Sergey Lukjanov +Wayne Ashley Berry +santosh653 <70637961+santosh653@users.noreply.github.com> diff --git a/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md new file mode 100644 index 000000000..d32a3f510 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md @@ -0,0 +1,19 @@ +# Changes + +## Version 1.2 + +Previously, if a Host declaration or a value had trailing whitespace, that +whitespace would have been included as part of the value. This led to unexpected +consequences. For example: + +``` +Host example # A comment + HostName example.com # Another comment +``` + +Prior to version 1.2, the value for Host would have been "example " and the +value for HostName would have been "example.com ". Both of these are +unintuitive. + +Instead, we strip the trailing whitespace in the configuration, which leads to +more intuitive behavior. diff --git a/vendor/github.com/kevinburke/ssh_config/LICENSE b/vendor/github.com/kevinburke/ssh_config/LICENSE new file mode 100644 index 000000000..b9a770ac2 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/LICENSE @@ -0,0 +1,49 @@ +Copyright (c) 2017 Kevin Burke. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +=================== + +The lexer and parser borrow heavily from github.com/pelletier/go-toml. The +license for that project is copied below. + +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/kevinburke/ssh_config/Makefile b/vendor/github.com/kevinburke/ssh_config/Makefile new file mode 100644 index 000000000..df7ee728b --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/Makefile @@ -0,0 +1,33 @@ +BUMP_VERSION := $(GOPATH)/bin/bump_version +STATICCHECK := $(GOPATH)/bin/staticcheck +WRITE_MAILMAP := $(GOPATH)/bin/write_mailmap + +$(STATICCHECK): + go get honnef.co/go/tools/cmd/staticcheck + +lint: $(STATICCHECK) + go vet ./... + $(STATICCHECK) + +test: lint + @# the timeout helps guard against infinite recursion + go test -timeout=250ms ./... + +race-test: lint + go test -timeout=500ms -race ./... + +$(BUMP_VERSION): + go get -u github.com/kevinburke/bump_version + +$(WRITE_MAILMAP): + go get -u github.com/kevinburke/write_mailmap + +release: test | $(BUMP_VERSION) + $(BUMP_VERSION) --tag-prefix=v minor config.go + +force: ; + +AUTHORS.txt: force | $(WRITE_MAILMAP) + $(WRITE_MAILMAP) > AUTHORS.txt + +authors: AUTHORS.txt diff --git a/vendor/github.com/kevinburke/ssh_config/README.md b/vendor/github.com/kevinburke/ssh_config/README.md new file mode 100644 index 000000000..f14b2168f --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/README.md @@ -0,0 +1,92 @@ +# ssh_config + +This is a Go parser for `ssh_config` files. Importantly, this parser attempts +to preserve comments in a given file, so you can manipulate a `ssh_config` file +from a program, if your heart desires. + +It's designed to be used with the excellent +[x/crypto/ssh](https://golang.org/x/crypto/ssh) package, which handles SSH +negotiation but isn't very easy to configure. + +The `ssh_config` `Get()` and `GetStrict()` functions will attempt to read values +from `$HOME/.ssh/config` and fall back to `/etc/ssh/ssh_config`. The first +argument is the host name to match on, and the second argument is the key you +want to retrieve. + +```go +port := ssh_config.Get("myhost", "Port") +``` + +Certain directives can occur multiple times for a host (such as `IdentityFile`), +so you should use the `GetAll` or `GetAllStrict` directive to retrieve those +instead. + +```go +files := ssh_config.GetAll("myhost", "IdentityFile") +``` + +You can also load a config file and read values from it. + +```go +var config = ` +Host *.test + Compression yes +` + +cfg, err := ssh_config.Decode(strings.NewReader(config)) +fmt.Println(cfg.Get("example.test", "Port")) +``` + +Some SSH arguments have default values - for example, the default value for +`KeyboardAuthentication` is `"yes"`. If you call Get(), and no value for the +given Host/keyword pair exists in the config, we'll return a default for the +keyword if one exists. + +### Manipulating SSH config files + +Here's how you can manipulate an SSH config file, and then write it back to +disk. + +```go +f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) +cfg, _ := ssh_config.Decode(f) +for _, host := range cfg.Hosts { + fmt.Println("patterns:", host.Patterns) + for _, node := range host.Nodes { + // Manipulate the nodes as you see fit, or use a type switch to + // distinguish between Empty, KV, and Include nodes. + fmt.Println(node.String()) + } +} + +// Print the config to stdout: +fmt.Println(cfg.String()) +``` + +## Spec compliance + +Wherever possible we try to implement the specification as documented in +the `ssh_config` manpage. Unimplemented features should be present in the +[issues][issues] list. + +Notably, the `Match` directive is currently unsupported. + +[issues]: https://github.com/kevinburke/ssh_config/issues + +## Errata + +This is the second [comment-preserving configuration parser][blog] I've written, after +[an /etc/hosts parser][hostsfile]. Eventually, I will write one for every Linux +file format. + +[blog]: https://kev.inburke.com/kevin/more-comment-preserving-configuration-parsers/ +[hostsfile]: https://github.com/kevinburke/hostsfile + +## Donating + +I don't get paid to maintain this project. Donations free up time to make +improvements to the library, and respond to bug reports. You can send donations +via Paypal's "Send Money" feature to kev@inburke.com. Donations are not tax +deductible in the USA. + +You can also reach out about a consulting engagement: https://burke.services diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go new file mode 100644 index 000000000..00d815c1a --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/config.go @@ -0,0 +1,803 @@ +// Package ssh_config provides tools for manipulating SSH config files. +// +// Importantly, this parser attempts to preserve comments in a given file, so +// you can manipulate a `ssh_config` file from a program, if your heart desires. +// +// The Get() and GetStrict() functions will attempt to read values from +// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is +// the host name to match on ("example.com"), and the second argument is the key +// you want to retrieve ("Port"). The keywords are case insensitive. +// +// port := ssh_config.Get("myhost", "Port") +// +// You can also manipulate an SSH config file and then print it or write it back +// to disk. +// +// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) +// cfg, _ := ssh_config.Decode(f) +// for _, host := range cfg.Hosts { +// fmt.Println("patterns:", host.Patterns) +// for _, node := range host.Nodes { +// fmt.Println(node.String()) +// } +// } +// +// // Write the cfg back to disk: +// fmt.Println(cfg.String()) +// +// BUG: the Match directive is currently unsupported; parsing a config with +// a Match directive will trigger an error. +package ssh_config + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + osuser "os/user" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" +) + +const version = "1.2" + +var _ = version + +type configFinder func() string + +// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config +// files are parsed and cached the first time Get() or GetStrict() is called. +type UserSettings struct { + IgnoreErrors bool + systemConfig *Config + systemConfigFinder configFinder + userConfig *Config + userConfigFinder configFinder + loadConfigs sync.Once + onceErr error +} + +func homedir() string { + user, err := osuser.Current() + if err == nil { + return user.HomeDir + } else { + return os.Getenv("HOME") + } +} + +func userConfigFinder() string { + return filepath.Join(homedir(), ".ssh", "config") +} + +// DefaultUserSettings is the default UserSettings and is used by Get and +// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys, +// and it will return parse errors (if any) instead of swallowing them. +var DefaultUserSettings = &UserSettings{ + IgnoreErrors: false, + systemConfigFinder: systemConfigFinder, + userConfigFinder: userConfigFinder, +} + +func systemConfigFinder() string { + return filepath.Join("/", "etc", "ssh", "ssh_config") +} + +func findVal(c *Config, alias, key string) (string, error) { + if c == nil { + return "", nil + } + val, err := c.Get(alias, key) + if err != nil || val == "" { + return "", err + } + if err := validate(key, val); err != nil { + return "", err + } + return val, nil +} + +func findAll(c *Config, alias, key string) ([]string, error) { + if c == nil { + return nil, nil + } + return c.GetAll(alias, key) +} + +// Get finds the first value for key within a declaration that matches the +// alias. Get returns the empty string if no value was found, or if IgnoreErrors +// is false and we could not parse the configuration file. Use GetStrict to +// disambiguate the latter cases. +// +// The match for key is case insensitive. +// +// Get is a wrapper around DefaultUserSettings.Get. +func Get(alias, key string) string { + return DefaultUserSettings.Get(alias, key) +} + +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetAllStrict to disambiguate the +// latter cases. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The match for key is case insensitive. +// +// GetAll is a wrapper around DefaultUserSettings.GetAll. +func GetAll(alias, key string) []string { + return DefaultUserSettings.GetAll(alias, key) +} + +// GetStrict finds the first value for key within a declaration that matches the +// alias. If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +// +// GetStrict is a wrapper around DefaultUserSettings.GetStrict. +func GetStrict(alias, key string) (string, error) { + return DefaultUserSettings.GetStrict(alias, key) +} + +// GetAllStrict retrieves zero or more directives for key for the given alias. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +// +// GetAllStrict is a wrapper around DefaultUserSettings.GetAllStrict. +func GetAllStrict(alias, key string) ([]string, error) { + return DefaultUserSettings.GetAllStrict(alias, key) +} + +// Get finds the first value for key within a declaration that matches the +// alias. Get returns the empty string if no value was found, or if IgnoreErrors +// is false and we could not parse the configuration file. Use GetStrict to +// disambiguate the latter cases. +// +// The match for key is case insensitive. +func (u *UserSettings) Get(alias, key string) string { + val, err := u.GetStrict(alias, key) + if err != nil { + return "" + } + return val +} + +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetStrict to disambiguate the latter +// cases. +// +// The match for key is case insensitive. +func (u *UserSettings) GetAll(alias, key string) []string { + val, _ := u.GetAllStrict(alias, key) + return val +} + +// GetStrict finds the first value for key within a declaration that matches the +// alias. If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// error will be non-nil if and only if a user's configuration file or the +// system configuration file could not be parsed, and u.IgnoreErrors is false. +func (u *UserSettings) GetStrict(alias, key string) (string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return "", u.onceErr + } + val, err := findVal(u.userConfig, alias, key) + if err != nil || val != "" { + return val, err + } + val2, err2 := findVal(u.systemConfig, alias, key) + if err2 != nil || val2 != "" { + return val2, err2 + } + return Default(key), nil +} + +// GetAllStrict retrieves zero or more directives for key for the given alias. +// If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return nil, u.onceErr + } + val, err := findAll(u.userConfig, alias, key) + if err != nil || val != nil { + return val, err + } + val2, err2 := findAll(u.systemConfig, alias, key) + if err2 != nil || val2 != nil { + return val2, err2 + } + // TODO: IdentityFile has multiple default values that we should return. + if def := Default(key); def != "" { + return []string{def}, nil + } + return []string{}, nil +} + +func (u *UserSettings) doLoadConfigs() { + u.loadConfigs.Do(func() { + // can't parse user file, that's ok. + var filename string + if u.userConfigFinder == nil { + filename = userConfigFinder() + } else { + filename = u.userConfigFinder() + } + var err error + u.userConfig, err = parseFile(filename) + //lint:ignore S1002 I prefer it this way + if err != nil && os.IsNotExist(err) == false { + u.onceErr = err + return + } + if u.systemConfigFinder == nil { + filename = systemConfigFinder() + } else { + filename = u.systemConfigFinder() + } + u.systemConfig, err = parseFile(filename) + //lint:ignore S1002 I prefer it this way + if err != nil && os.IsNotExist(err) == false { + u.onceErr = err + return + } + }) +} + +func parseFile(filename string) (*Config, error) { + return parseWithDepth(filename, 0) +} + +func parseWithDepth(filename string, depth uint8) (*Config, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return decodeBytes(b, isSystem(filename), depth) +} + +func isSystem(filename string) bool { + // TODO: not sure this is the best way to detect a system repo + return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh") +} + +// Decode reads r into a Config, or returns an error if r could not be parsed as +// an SSH config file. +func Decode(r io.Reader) (*Config, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return decodeBytes(b, false, 0) +} + +// DecodeBytes reads b into a Config, or returns an error if r could not be +// parsed as an SSH config file. +func DecodeBytes(b []byte) (*Config, error) { + return decodeBytes(b, false, 0) +} + +func decodeBytes(b []byte, system bool, depth uint8) (c *Config, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if e, ok := r.(error); ok && e == ErrDepthExceeded { + err = e + return + } + err = errors.New(r.(string)) + } + }() + + c = parseSSH(lexSSH(b), system, depth) + return c, err +} + +// Config represents an SSH config file. +type Config struct { + // A list of hosts to match against. The file begins with an implicit + // "Host *" declaration matching all hosts. + Hosts []*Host + depth uint8 + position Position +} + +// Get finds the first value in the configuration that matches the alias and +// contains key. Get returns the empty string if no value was found, or if the +// Config contains an invalid conditional Include value. +// +// The match for key is case insensitive. +func (c *Config) Get(alias, key string) (string, error) { + lowerKey := strings.ToLower(key) + for _, host := range c.Hosts { + if !host.Matches(alias) { + continue + } + for _, node := range host.Nodes { + switch t := node.(type) { + case *Empty: + continue + case *KV: + // "keys are case insensitive" per the spec + lkey := strings.ToLower(t.Key) + if lkey == "match" { + panic("can't handle Match directives") + } + if lkey == lowerKey { + return t.Value, nil + } + case *Include: + val := t.Get(alias, key) + if val != "" { + return val, nil + } + default: + return "", fmt.Errorf("unknown Node type %v", t) + } + } + } + return "", nil +} + +// GetAll returns all values in the configuration that match the alias and +// contains key, or nil if none are present. +func (c *Config) GetAll(alias, key string) ([]string, error) { + lowerKey := strings.ToLower(key) + all := []string(nil) + for _, host := range c.Hosts { + if !host.Matches(alias) { + continue + } + for _, node := range host.Nodes { + switch t := node.(type) { + case *Empty: + continue + case *KV: + // "keys are case insensitive" per the spec + lkey := strings.ToLower(t.Key) + if lkey == "match" { + panic("can't handle Match directives") + } + if lkey == lowerKey { + all = append(all, t.Value) + } + case *Include: + val, _ := t.GetAll(alias, key) + if len(val) > 0 { + all = append(all, val...) + } + default: + return nil, fmt.Errorf("unknown Node type %v", t) + } + } + } + + return all, nil +} + +// String returns a string representation of the Config file. +func (c Config) String() string { + return marshal(c).String() +} + +func (c Config) MarshalText() ([]byte, error) { + return marshal(c).Bytes(), nil +} + +func marshal(c Config) *bytes.Buffer { + var buf bytes.Buffer + for i := range c.Hosts { + buf.WriteString(c.Hosts[i].String()) + } + return &buf +} + +// Pattern is a pattern in a Host declaration. Patterns are read-only values; +// create a new one with NewPattern(). +type Pattern struct { + str string // Its appearance in the file, not the value that gets compiled. + regex *regexp.Regexp + not bool // True if this is a negated match +} + +// String prints the string representation of the pattern. +func (p Pattern) String() string { + return p.str +} + +// Copied from regexp.go with * and ? removed. +var specialBytes = []byte(`\.+()|[]{}^$`) + +func special(b byte) bool { + return bytes.IndexByte(specialBytes, b) >= 0 +} + +// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates +// a Pattern that matches all hosts. +// +// From the manpage, a pattern consists of zero or more non-whitespace +// characters, `*' (a wildcard that matches zero or more characters), or `?' (a +// wildcard that matches exactly one character). For example, to specify a set +// of declarations for any host in the ".co.uk" set of domains, the following +// pattern could be used: +// +// Host *.co.uk +// +// The following pattern would match any host in the 192.168.0.[0-9] network range: +// +// Host 192.168.0.? +func NewPattern(s string) (*Pattern, error) { + if s == "" { + return nil, errors.New("ssh_config: empty pattern") + } + negated := false + if s[0] == '!' { + negated = true + s = s[1:] + } + var buf bytes.Buffer + buf.WriteByte('^') + for i := 0; i < len(s); i++ { + // A byte loop is correct because all metacharacters are ASCII. + switch b := s[i]; b { + case '*': + buf.WriteString(".*") + case '?': + buf.WriteString(".?") + default: + // borrowing from QuoteMeta here. + if special(b) { + buf.WriteByte('\\') + } + buf.WriteByte(b) + } + } + buf.WriteByte('$') + r, err := regexp.Compile(buf.String()) + if err != nil { + return nil, err + } + return &Pattern{str: s, regex: r, not: negated}, nil +} + +// Host describes a Host directive and the keywords that follow it. +type Host struct { + // A list of host patterns that should match this host. + Patterns []*Pattern + // A Node is either a key/value pair or a comment line. + Nodes []Node + // EOLComment is the comment (if any) terminating the Host line. + EOLComment string + // Whitespace if any between the Host declaration and a trailing comment. + spaceBeforeComment string + + hasEquals bool + leadingSpace int // TODO: handle spaces vs tabs here. + // The file starts with an implicit "Host *" declaration. + implicit bool +} + +// Matches returns true if the Host matches for the given alias. For +// a description of the rules that provide a match, see the manpage for +// ssh_config. +func (h *Host) Matches(alias string) bool { + found := false + for i := range h.Patterns { + if h.Patterns[i].regex.MatchString(alias) { + if h.Patterns[i].not { + // Negated match. "A pattern entry may be negated by prefixing + // it with an exclamation mark (`!'). If a negated entry is + // matched, then the Host entry is ignored, regardless of + // whether any other patterns on the line match. Negated matches + // are therefore useful to provide exceptions for wildcard + // matches." + return false + } + found = true + } + } + return found +} + +// String prints h as it would appear in a config file. Minor tweaks may be +// present in the whitespace in the printed file. +func (h *Host) String() string { + var buf strings.Builder + //lint:ignore S1002 I prefer to write it this way + if h.implicit == false { + buf.WriteString(strings.Repeat(" ", int(h.leadingSpace))) + buf.WriteString("Host") + if h.hasEquals { + buf.WriteString(" = ") + } else { + buf.WriteString(" ") + } + for i, pat := range h.Patterns { + buf.WriteString(pat.String()) + if i < len(h.Patterns)-1 { + buf.WriteString(" ") + } + } + buf.WriteString(h.spaceBeforeComment) + if h.EOLComment != "" { + buf.WriteByte('#') + buf.WriteString(h.EOLComment) + } + buf.WriteByte('\n') + } + for i := range h.Nodes { + buf.WriteString(h.Nodes[i].String()) + buf.WriteByte('\n') + } + return buf.String() +} + +// Node represents a line in a Config. +type Node interface { + Pos() Position + String() string +} + +// KV is a line in the config file that contains a key, a value, and possibly +// a comment. +type KV struct { + Key string + Value string + // Whitespace after the value but before any comment + spaceAfterValue string + Comment string + hasEquals bool + leadingSpace int // Space before the key. TODO handle spaces vs tabs. + position Position +} + +// Pos returns k's Position. +func (k *KV) Pos() Position { + return k.position +} + +// String prints k as it was parsed in the config file. +func (k *KV) String() string { + if k == nil { + return "" + } + equals := " " + if k.hasEquals { + equals = " = " + } + line := strings.Repeat(" ", int(k.leadingSpace)) + k.Key + equals + k.Value + k.spaceAfterValue + if k.Comment != "" { + line += "#" + k.Comment + } + return line +} + +// Empty is a line in the config file that contains only whitespace or comments. +type Empty struct { + Comment string + leadingSpace int // TODO handle spaces vs tabs. + position Position +} + +// Pos returns e's Position. +func (e *Empty) Pos() Position { + return e.position +} + +// String prints e as it was parsed in the config file. +func (e *Empty) String() string { + if e == nil { + return "" + } + if e.Comment == "" { + return "" + } + return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment) +} + +// Include holds the result of an Include directive, including the config files +// that have been parsed as part of that directive. At most 5 levels of Include +// statements will be parsed. +type Include struct { + // Comment is the contents of any comment at the end of the Include + // statement. + Comment string + // an include directive can include several different files, and wildcards + directives []string + + mu sync.Mutex + // 1:1 mapping between matches and keys in files array; matches preserves + // ordering + matches []string + // actual filenames are listed here + files map[string]*Config + leadingSpace int + position Position + depth uint8 + hasEquals bool +} + +const maxRecurseDepth = 5 + +// ErrDepthExceeded is returned if too many Include directives are parsed. +// Usually this indicates a recursive loop (an Include directive pointing to the +// file it contains). +var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded") + +func removeDups(arr []string) []string { + // Use map to record duplicates as we find them. + encountered := make(map[string]bool, len(arr)) + result := make([]string, 0) + + for v := range arr { + //lint:ignore S1002 I prefer it this way + if encountered[arr[v]] == false { + encountered[arr[v]] = true + result = append(result, arr[v]) + } + } + return result +} + +// NewInclude creates a new Include with a list of file globs to include. +// Configuration files are parsed greedily (e.g. as soon as this function runs). +// Any error encountered while parsing nested configuration files will be +// returned. +func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) { + if depth > maxRecurseDepth { + return nil, ErrDepthExceeded + } + inc := &Include{ + Comment: comment, + directives: directives, + files: make(map[string]*Config), + position: pos, + leadingSpace: pos.Col - 1, + depth: depth, + hasEquals: hasEquals, + } + // no need for inc.mu.Lock() since nothing else can access this inc + matches := make([]string, 0) + for i := range directives { + var path string + if filepath.IsAbs(directives[i]) { + path = directives[i] + } else if system { + path = filepath.Join("/etc/ssh", directives[i]) + } else { + path = filepath.Join(homedir(), ".ssh", directives[i]) + } + theseMatches, err := filepath.Glob(path) + if err != nil { + return nil, err + } + matches = append(matches, theseMatches...) + } + matches = removeDups(matches) + inc.matches = matches + for i := range matches { + config, err := parseWithDepth(matches[i], depth) + if err != nil { + return nil, err + } + inc.files[matches[i]] = config + } + return inc, nil +} + +// Pos returns the position of the Include directive in the larger file. +func (i *Include) Pos() Position { + return i.position +} + +// Get finds the first value in the Include statement matching the alias and the +// given key. +func (inc *Include) Get(alias, key string) string { + inc.mu.Lock() + defer inc.mu.Unlock() + // TODO: we search files in any order which is not correct + for i := range inc.matches { + cfg := inc.files[inc.matches[i]] + if cfg == nil { + panic("nil cfg") + } + val, err := cfg.Get(alias, key) + if err == nil && val != "" { + return val + } + } + return "" +} + +// GetAll finds all values in the Include statement matching the alias and the +// given key. +func (inc *Include) GetAll(alias, key string) ([]string, error) { + inc.mu.Lock() + defer inc.mu.Unlock() + var vals []string + + // TODO: we search files in any order which is not correct + for i := range inc.matches { + cfg := inc.files[inc.matches[i]] + if cfg == nil { + panic("nil cfg") + } + val, err := cfg.GetAll(alias, key) + if err == nil && len(val) != 0 { + // In theory if SupportsMultiple was false for this key we could + // stop looking here. But the caller has asked us to find all + // instances of the keyword (and could use Get() if they wanted) so + // let's keep looking. + vals = append(vals, val...) + } + } + return vals, nil +} + +// String prints out a string representation of this Include directive. Note +// included Config files are not printed as part of this representation. +func (inc *Include) String() string { + equals := " " + if inc.hasEquals { + equals = " = " + } + line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " ")) + if inc.Comment != "" { + line += " #" + inc.Comment + } + return line +} + +var matchAll *Pattern + +func init() { + var err error + matchAll, err = NewPattern("*") + if err != nil { + panic(err) + } +} + +func newConfig() *Config { + return &Config{ + Hosts: []*Host{ + &Host{ + implicit: true, + Patterns: []*Pattern{matchAll}, + Nodes: make([]Node, 0), + }, + }, + depth: 0, + } +} diff --git a/vendor/github.com/kevinburke/ssh_config/lexer.go b/vendor/github.com/kevinburke/ssh_config/lexer.go new file mode 100644 index 000000000..11680b4c7 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/lexer.go @@ -0,0 +1,240 @@ +package ssh_config + +import ( + "bytes" +) + +// Define state functions +type sshLexStateFn func() sshLexStateFn + +type sshLexer struct { + inputIdx int + input []rune // Textual source + + buffer []rune // Runes composing the current token + tokens chan token + line int + col int + endbufferLine int + endbufferCol int +} + +func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn { + return func() sshLexStateFn { + growingString := "" + for next := s.peek(); next != '\n' && next != eof; next = s.peek() { + if next == '\r' && s.follow("\r\n") { + break + } + growingString += string(next) + s.next() + } + s.emitWithValue(tokenComment, growingString) + s.skip() + return previousState + } +} + +// lex the space after an equals sign in a function +func (s *sshLexer) lexRspace() sshLexStateFn { + for { + next := s.peek() + if !isSpace(next) { + break + } + s.skip() + } + return s.lexRvalue +} + +func (s *sshLexer) lexEquals() sshLexStateFn { + for { + next := s.peek() + if next == '=' { + s.emit(tokenEquals) + s.skip() + return s.lexRspace + } + // TODO error handling here; newline eof etc. + if !isSpace(next) { + break + } + s.skip() + } + return s.lexRvalue +} + +func (s *sshLexer) lexKey() sshLexStateFn { + growingString := "" + + for r := s.peek(); isKeyChar(r); r = s.peek() { + // simplified a lot here + if isSpace(r) || r == '=' { + s.emitWithValue(tokenKey, growingString) + s.skip() + return s.lexEquals + } + growingString += string(r) + s.next() + } + s.emitWithValue(tokenKey, growingString) + return s.lexEquals +} + +func (s *sshLexer) lexRvalue() sshLexStateFn { + growingString := "" + for { + next := s.peek() + switch next { + case '\r': + if s.follow("\r\n") { + s.emitWithValue(tokenString, growingString) + s.skip() + return s.lexVoid + } + case '\n': + s.emitWithValue(tokenString, growingString) + s.skip() + return s.lexVoid + case '#': + s.emitWithValue(tokenString, growingString) + s.skip() + return s.lexComment(s.lexVoid) + case eof: + s.next() + } + if next == eof { + break + } + growingString += string(next) + s.next() + } + s.emit(tokenEOF) + return nil +} + +func (s *sshLexer) read() rune { + r := s.peek() + if r == '\n' { + s.endbufferLine++ + s.endbufferCol = 1 + } else { + s.endbufferCol++ + } + s.inputIdx++ + return r +} + +func (s *sshLexer) next() rune { + r := s.read() + + if r != eof { + s.buffer = append(s.buffer, r) + } + return r +} + +func (s *sshLexer) lexVoid() sshLexStateFn { + for { + next := s.peek() + switch next { + case '#': + s.skip() + return s.lexComment(s.lexVoid) + case '\r': + fallthrough + case '\n': + s.emit(tokenEmptyLine) + s.skip() + continue + } + + if isSpace(next) { + s.skip() + } + + if isKeyStartChar(next) { + return s.lexKey + } + + // removed IsKeyStartChar and lexKey. probably will need to readd + + if next == eof { + s.next() + break + } + } + + s.emit(tokenEOF) + return nil +} + +func (s *sshLexer) ignore() { + s.buffer = make([]rune, 0) + s.line = s.endbufferLine + s.col = s.endbufferCol +} + +func (s *sshLexer) skip() { + s.next() + s.ignore() +} + +func (s *sshLexer) emit(t tokenType) { + s.emitWithValue(t, string(s.buffer)) +} + +func (s *sshLexer) emitWithValue(t tokenType, value string) { + tok := token{ + Position: Position{s.line, s.col}, + typ: t, + val: value, + } + s.tokens <- tok + s.ignore() +} + +func (s *sshLexer) peek() rune { + if s.inputIdx >= len(s.input) { + return eof + } + + r := s.input[s.inputIdx] + return r +} + +func (s *sshLexer) follow(next string) bool { + inputIdx := s.inputIdx + for _, expectedRune := range next { + if inputIdx >= len(s.input) { + return false + } + r := s.input[inputIdx] + inputIdx++ + if expectedRune != r { + return false + } + } + return true +} + +func (s *sshLexer) run() { + for state := s.lexVoid; state != nil; { + state = state() + } + close(s.tokens) +} + +func lexSSH(input []byte) chan token { + runes := bytes.Runes(input) + l := &sshLexer{ + input: runes, + tokens: make(chan token), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + go l.run() + return l.tokens +} diff --git a/vendor/github.com/kevinburke/ssh_config/parser.go b/vendor/github.com/kevinburke/ssh_config/parser.go new file mode 100644 index 000000000..2b1e718cb --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/parser.go @@ -0,0 +1,200 @@ +package ssh_config + +import ( + "fmt" + "strings" + "unicode" +) + +type sshParser struct { + flow chan token + config *Config + tokensBuffer []token + currentTable []string + seenTableKeys []string + // /etc/ssh parser or local parser - used to find the default for relative + // filepaths in the Include directive + system bool + depth uint8 +} + +type sshParserStateFn func() sshParserStateFn + +// Formats and panics an error message based on a token +func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) { + // TODO this format is ugly + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *sshParser) raiseError(tok *token, err error) { + if err == ErrDepthExceeded { + panic(err) + } + // TODO this format is ugly + panic(tok.Position.String() + ": " + err.Error()) +} + +func (p *sshParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *sshParser) peek() *token { + if len(p.tokensBuffer) != 0 { + return &(p.tokensBuffer[0]) + } + + tok, ok := <-p.flow + if !ok { + return nil + } + p.tokensBuffer = append(p.tokensBuffer, tok) + return &tok +} + +func (p *sshParser) getToken() *token { + if len(p.tokensBuffer) != 0 { + tok := p.tokensBuffer[0] + p.tokensBuffer = p.tokensBuffer[1:] + return &tok + } + tok, ok := <-p.flow + if !ok { + return nil + } + return &tok +} + +func (p *sshParser) parseStart() sshParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenComment, tokenEmptyLine: + return p.parseComment + case tokenKey: + return p.parseKV + case tokenEOF: + return nil + default: + p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok)) + } + return nil +} + +func (p *sshParser) parseKV() sshParserStateFn { + key := p.getToken() + hasEquals := false + val := p.getToken() + if val.typ == tokenEquals { + hasEquals = true + val = p.getToken() + } + comment := "" + tok := p.peek() + if tok == nil { + tok = &token{typ: tokenEOF} + } + if tok.typ == tokenComment && tok.Position.Line == val.Position.Line { + tok = p.getToken() + comment = tok.val + } + if strings.ToLower(key.val) == "match" { + // https://github.com/kevinburke/ssh_config/issues/6 + p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported") + return nil + } + if strings.ToLower(key.val) == "host" { + strPatterns := strings.Split(val.val, " ") + patterns := make([]*Pattern, 0) + for i := range strPatterns { + if strPatterns[i] == "" { + continue + } + pat, err := NewPattern(strPatterns[i]) + if err != nil { + p.raiseErrorf(val, "Invalid host pattern: %v", err) + return nil + } + patterns = append(patterns, pat) + } + // val.val at this point could be e.g. "example.com " + hostval := strings.TrimRightFunc(val.val, unicode.IsSpace) + spaceBeforeComment := val.val[len(hostval):] + val.val = hostval + p.config.Hosts = append(p.config.Hosts, &Host{ + Patterns: patterns, + Nodes: make([]Node, 0), + EOLComment: comment, + spaceBeforeComment: spaceBeforeComment, + hasEquals: hasEquals, + }) + return p.parseStart + } + lastHost := p.config.Hosts[len(p.config.Hosts)-1] + if strings.ToLower(key.val) == "include" { + inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1) + if err == ErrDepthExceeded { + p.raiseError(val, err) + return nil + } + if err != nil { + p.raiseErrorf(val, "Error parsing Include directive: %v", err) + return nil + } + lastHost.Nodes = append(lastHost.Nodes, inc) + return p.parseStart + } + shortval := strings.TrimRightFunc(val.val, unicode.IsSpace) + spaceAfterValue := val.val[len(shortval):] + kv := &KV{ + Key: key.val, + Value: shortval, + spaceAfterValue: spaceAfterValue, + Comment: comment, + hasEquals: hasEquals, + leadingSpace: key.Position.Col - 1, + position: key.Position, + } + lastHost.Nodes = append(lastHost.Nodes, kv) + return p.parseStart +} + +func (p *sshParser) parseComment() sshParserStateFn { + comment := p.getToken() + lastHost := p.config.Hosts[len(p.config.Hosts)-1] + lastHost.Nodes = append(lastHost.Nodes, &Empty{ + Comment: comment.val, + // account for the "#" as well + leadingSpace: comment.Position.Col - 2, + position: comment.Position, + }) + return p.parseStart +} + +func parseSSH(flow chan token, system bool, depth uint8) *Config { + // Ensure we consume tokens to completion even if parser exits early + defer func() { + for range flow { + } + }() + + result := newConfig() + result.position = Position{1, 1} + parser := &sshParser{ + flow: flow, + config: result, + tokensBuffer: make([]token, 0), + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + system: system, + depth: depth, + } + parser.run() + return result +} diff --git a/vendor/github.com/kevinburke/ssh_config/position.go b/vendor/github.com/kevinburke/ssh_config/position.go new file mode 100644 index 000000000..e0b5e3fb3 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/position.go @@ -0,0 +1,25 @@ +package ssh_config + +import "fmt" + +// Position of a document element within a SSH document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/kevinburke/ssh_config/token.go b/vendor/github.com/kevinburke/ssh_config/token.go new file mode 100644 index 000000000..a0ecbb2bb --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/token.go @@ -0,0 +1,49 @@ +package ssh_config + +import "fmt" + +type token struct { + Position + typ tokenType + val string +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + } + return fmt.Sprintf("%q", t.val) +} + +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenEmptyLine + tokenComment + tokenKey + tokenEquals + tokenString +) + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof) +} + +// I'm not sure that this is correct +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} diff --git a/vendor/github.com/kevinburke/ssh_config/validators.go b/vendor/github.com/kevinburke/ssh_config/validators.go new file mode 100644 index 000000000..5977f9096 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/validators.go @@ -0,0 +1,186 @@ +package ssh_config + +import ( + "fmt" + "strconv" + "strings" +) + +// Default returns the default value for the given keyword, for example "22" if +// the keyword is "Port". Default returns the empty string if the keyword has no +// default, or if the keyword is unknown. Keyword matching is case-insensitive. +// +// Default values are provided by OpenSSH_7.4p1 on a Mac. +func Default(keyword string) string { + return defaults[strings.ToLower(keyword)] +} + +// Arguments where the value must be "yes" or "no" and *only* yes or no. +var yesnos = map[string]bool{ + strings.ToLower("BatchMode"): true, + strings.ToLower("CanonicalizeFallbackLocal"): true, + strings.ToLower("ChallengeResponseAuthentication"): true, + strings.ToLower("CheckHostIP"): true, + strings.ToLower("ClearAllForwardings"): true, + strings.ToLower("Compression"): true, + strings.ToLower("EnableSSHKeysign"): true, + strings.ToLower("ExitOnForwardFailure"): true, + strings.ToLower("ForwardAgent"): true, + strings.ToLower("ForwardX11"): true, + strings.ToLower("ForwardX11Trusted"): true, + strings.ToLower("GatewayPorts"): true, + strings.ToLower("GSSAPIAuthentication"): true, + strings.ToLower("GSSAPIDelegateCredentials"): true, + strings.ToLower("HostbasedAuthentication"): true, + strings.ToLower("IdentitiesOnly"): true, + strings.ToLower("KbdInteractiveAuthentication"): true, + strings.ToLower("NoHostAuthenticationForLocalhost"): true, + strings.ToLower("PasswordAuthentication"): true, + strings.ToLower("PermitLocalCommand"): true, + strings.ToLower("PubkeyAuthentication"): true, + strings.ToLower("RhostsRSAAuthentication"): true, + strings.ToLower("RSAAuthentication"): true, + strings.ToLower("StreamLocalBindUnlink"): true, + strings.ToLower("TCPKeepAlive"): true, + strings.ToLower("UseKeychain"): true, + strings.ToLower("UsePrivilegedPort"): true, + strings.ToLower("VisualHostKey"): true, +} + +var uints = map[string]bool{ + strings.ToLower("CanonicalizeMaxDots"): true, + strings.ToLower("CompressionLevel"): true, // 1 to 9 + strings.ToLower("ConnectionAttempts"): true, + strings.ToLower("ConnectTimeout"): true, + strings.ToLower("NumberOfPasswordPrompts"): true, + strings.ToLower("Port"): true, + strings.ToLower("ServerAliveCountMax"): true, + strings.ToLower("ServerAliveInterval"): true, +} + +func mustBeYesOrNo(lkey string) bool { + return yesnos[lkey] +} + +func mustBeUint(lkey string) bool { + return uints[lkey] +} + +func validate(key, val string) error { + lkey := strings.ToLower(key) + if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") { + return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val) + } + if mustBeUint(lkey) { + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return fmt.Errorf("ssh_config: %v", err) + } + } + return nil +} + +var defaults = map[string]string{ + strings.ToLower("AddKeysToAgent"): "no", + strings.ToLower("AddressFamily"): "any", + strings.ToLower("BatchMode"): "no", + strings.ToLower("CanonicalizeFallbackLocal"): "yes", + strings.ToLower("CanonicalizeHostname"): "no", + strings.ToLower("CanonicalizeMaxDots"): "1", + strings.ToLower("ChallengeResponseAuthentication"): "yes", + strings.ToLower("CheckHostIP"): "yes", + // TODO is this still the correct cipher + strings.ToLower("Cipher"): "3des", + strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc", + strings.ToLower("ClearAllForwardings"): "no", + strings.ToLower("Compression"): "no", + strings.ToLower("CompressionLevel"): "6", + strings.ToLower("ConnectionAttempts"): "1", + strings.ToLower("ControlMaster"): "no", + strings.ToLower("EnableSSHKeysign"): "no", + strings.ToLower("EscapeChar"): "~", + strings.ToLower("ExitOnForwardFailure"): "no", + strings.ToLower("FingerprintHash"): "sha256", + strings.ToLower("ForwardAgent"): "no", + strings.ToLower("ForwardX11"): "no", + strings.ToLower("ForwardX11Timeout"): "20m", + strings.ToLower("ForwardX11Trusted"): "no", + strings.ToLower("GatewayPorts"): "no", + strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2", + strings.ToLower("GSSAPIAuthentication"): "no", + strings.ToLower("GSSAPIDelegateCredentials"): "no", + strings.ToLower("HashKnownHosts"): "no", + strings.ToLower("HostbasedAuthentication"): "no", + + strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", + strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", + // HostName has a dynamic default (the value passed at the command line). + + strings.ToLower("IdentitiesOnly"): "no", + strings.ToLower("IdentityFile"): "~/.ssh/identity", + + // IPQoS has a dynamic default based on interactive or non-interactive + // sessions. + + strings.ToLower("KbdInteractiveAuthentication"): "yes", + + strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1", + strings.ToLower("LogLevel"): "INFO", + strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1", + + strings.ToLower("NoHostAuthenticationForLocalhost"): "no", + strings.ToLower("NumberOfPasswordPrompts"): "3", + strings.ToLower("PasswordAuthentication"): "yes", + strings.ToLower("PermitLocalCommand"): "no", + strings.ToLower("Port"): "22", + + strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password", + strings.ToLower("Protocol"): "2", + strings.ToLower("ProxyUseFdpass"): "no", + strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", + strings.ToLower("PubkeyAuthentication"): "yes", + strings.ToLower("RekeyLimit"): "default none", + strings.ToLower("RhostsRSAAuthentication"): "no", + strings.ToLower("RSAAuthentication"): "yes", + + strings.ToLower("ServerAliveCountMax"): "3", + strings.ToLower("ServerAliveInterval"): "0", + strings.ToLower("StreamLocalBindMask"): "0177", + strings.ToLower("StreamLocalBindUnlink"): "no", + strings.ToLower("StrictHostKeyChecking"): "ask", + strings.ToLower("TCPKeepAlive"): "yes", + strings.ToLower("Tunnel"): "no", + strings.ToLower("TunnelDevice"): "any:any", + strings.ToLower("UpdateHostKeys"): "no", + strings.ToLower("UseKeychain"): "no", + strings.ToLower("UsePrivilegedPort"): "no", + + strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2", + strings.ToLower("VerifyHostKeyDNS"): "no", + strings.ToLower("VisualHostKey"): "no", + strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth", +} + +// these identities are used for SSH protocol 2 +var defaultProtocol2Identities = []string{ + "~/.ssh/id_dsa", + "~/.ssh/id_ecdsa", + "~/.ssh/id_ed25519", + "~/.ssh/id_rsa", +} + +// these directives support multiple items that can be collected +// across multiple files +var pluralDirectives = map[string]bool{ + "CertificateFile": true, + "IdentityFile": true, + "DynamicForward": true, + "RemoteForward": true, + "SendEnv": true, + "SetEnv": true, +} + +// SupportsMultiple reports whether a directive can be specified multiple times. +func SupportsMultiple(key string) bool { + return pluralDirectives[strings.ToLower(key)] +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a22953805..4528059ca 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,5 +1,5 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh @@ -99,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359e4..de264c85a 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,27 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 66d1657d2..af53fb860 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 2f410d64f..0d7b437f1 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -298,6 +298,14 @@ const ( huffmanGenericReader ) +// flushMode tells decompressor when to return data +type flushMode uint8 + +const ( + syncFlush flushMode = iota // return data after sync flush block + partialFlush // return data after each block +) + // Decompress state. type decompressor struct { // Input source. @@ -332,6 +340,8 @@ type decompressor struct { nb uint final bool + + flushMode flushMode } func (f *decompressor) nextBlock() { @@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() { } if n == 0 { - f.toRead = f.dict.readFlush() + if f.flushMode == syncFlush { + f.toRead = f.dict.readFlush() + } + f.finishBlock() return } @@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } + f.err = io.EOF + } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() } + f.step = nextBlock } @@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { return nil } -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { +type ReaderOpt func(*decompressor) + +// WithPartialBlock tells decompressor to return after each block, +// so it can read data written with partial flush +func WithPartialBlock() ReaderOpt { + return func(f *decompressor) { + f.flushMode = partialFlush + } +} + +// WithDict initializes the reader with a preset dictionary +func WithDict(dict []byte) ReaderOpt { + return func(f *decompressor) { + f.dict.init(maxMatchOffset, dict) + } +} + +// NewReaderOpts returns new reader with provided options +func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor @@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser { f.codebits = new([numCodes]int) f.step = nextBlock f.dict.init(maxMatchOffset, nil) + + for _, opt := range opts { + opt(&f) + } + return &f } +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + return NewReaderOpts(r) +} + // NewReaderDict is like NewReader but initializes the reader // with a preset dictionary. The returned Reader behaves as if // the uncompressed data stream started with the given dictionary, @@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser { // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f + return NewReaderOpts(r, WithDict(dict)) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7e..0c7dd4ffe 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b25..0f56b02d7 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc7..9c28840c3 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91f..84a79fde7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f7..d36be7bd8 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0f..8f8223cd3 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7e..e47af66e7 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd828..c59f17e07 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b8..f5591fa1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc736..066bef2a4 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index 64df4a8d8..c01f551ab 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -157,58 +157,44 @@ type ValidationRecord struct { UsedRSAKEX bool `json:"-"` } -func looksLikeKeyAuthorization(str string) error { - parts := strings.Split(str, ".") - if len(parts) != 2 { - return fmt.Errorf("Invalid key authorization: does not look like a key authorization") - } else if !LooksLikeAToken(parts[0]) { - return fmt.Errorf("Invalid key authorization: malformed token") - } else if !LooksLikeAToken(parts[1]) { - // Thumbprints have the same syntax as tokens in boulder - // Both are base64-encoded and 32 octets - return fmt.Errorf("Invalid key authorization: malformed key thumbprint") - } - return nil -} - // Challenge is an aggregate of all data needed for any challenges. // // Rather than define individual types for different types of // challenge, we just throw all the elements into one bucket, // together with the common metadata elements. type Challenge struct { - // The type of challenge + // Type is the type of challenge encoded in this object. Type AcmeChallenge `json:"type"` - // The status of this challenge - Status AcmeStatus `json:"status,omitempty"` + // URL is the URL to which a response can be posted. Required for all types. + URL string `json:"url,omitempty"` - // Contains the error that occurred during challenge validation, if any - Error *probs.ProblemDetails `json:"error,omitempty"` + // Status is the status of this challenge. Required for all types. + Status AcmeStatus `json:"status,omitempty"` - // A URI to which a response can be POSTed - URI string `json:"uri,omitempty"` + // Validated is the time at which the server validated the challenge. Required + // if status is valid. + Validated *time.Time `json:"validated,omitempty"` - // For the V2 API the "URI" field is deprecated in favour of URL. - URL string `json:"url,omitempty"` + // Error contains the error that occurred during challenge validation, if any. + // If set, the Status must be "invalid". + Error *probs.ProblemDetails `json:"error,omitempty"` - // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges + // Token is a random value that uniquely identifies the challenge. It is used + // by all current challenges (http-01, tls-alpn-01, and dns-01). Token string `json:"token,omitempty"` - // The expected KeyAuthorization for validation of the challenge. Populated by - // the RA prior to passing the challenge to the VA. For legacy reasons this - // field is called "ProvidedKeyAuthorization" because it was initially set by - // the content of the challenge update POST from the client. It is no longer - // set that way and should be renamed to "KeyAuthorization". - // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`. + // ProvidedKeyAuthorization used to carry the expected key authorization from + // the RA to the VA. However, since this field is never presented to the user + // via the ACME API, it should not be on this type. + // + // Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead. + // TODO(#7514): Remove this. ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"` // Contains information about URLs used or redirected to and IPs resolved and // used ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` - // The time at which the server validated the challenge. Required by - // RFC8555 if status is valid. - Validated *time.Time `json:"validated,omitempty"` } // ExpectedKeyAuthorization computes the expected KeyAuthorization value for @@ -273,43 +259,18 @@ func (ch Challenge) RecordsSane() bool { return true } -// CheckConsistencyForClientOffer checks the fields of a challenge object before it is -// given to the client. -func (ch Challenge) CheckConsistencyForClientOffer() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // Before completion, the key authorization field should be empty - if ch.ProvidedKeyAuthorization != "" { - return fmt.Errorf("A response to this challenge was already submitted.") - } - return nil -} - -// CheckConsistencyForValidation checks the fields of a challenge object before it is -// given to the VA. -func (ch Challenge) CheckConsistencyForValidation() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // If the challenge is completed, then there should be a key authorization - return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization) -} - -// checkConsistency checks the sanity of a challenge object before issued to the client. -func (ch Challenge) checkConsistency() error { +// CheckPending ensures that a challenge object is pending and has a token. +// This is used before offering the challenge to the client, and before actually +// validating a challenge. +func (ch Challenge) CheckPending() error { if ch.Status != StatusPending { - return fmt.Errorf("The challenge is not pending.") + return fmt.Errorf("challenge is not pending") } - // There always needs to be a token - if !LooksLikeAToken(ch.Token) { - return fmt.Errorf("The token is missing.") + if !looksLikeAToken(ch.Token) { + return fmt.Errorf("token is missing or malformed") } + return nil } diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index 31f6d2fcf..641521f16 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -76,9 +76,9 @@ func NewToken() string { var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`) -// LooksLikeAToken checks whether a string represents a 32-octet value in +// looksLikeAToken checks whether a string represents a 32-octet value in // the URL-safe base64 alphabet. -func LooksLikeAToken(token string) bool { +func looksLikeAToken(token string) bool { return tokenFormat.MatchString(token) } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index 087a01812..04a075d35 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -39,6 +39,9 @@ var ( ) type Config struct { + // AllowedKeys enables or disables specific key algorithms and sizes. If + // nil, defaults to just those keys allowed by the Let's Encrypt CPS. + AllowedKeys *AllowedKeys // WeakKeyFile is the path to a JSON file containing truncated modulus hashes // of known weak RSA keys. If this config value is empty, then RSA modulus // hash checking will be disabled. @@ -54,6 +57,40 @@ type Config struct { FermatRounds int } +// AllowedKeys is a map of six specific key algorithm and size combinations to +// booleans indicating whether keys of that type are considered good. +type AllowedKeys struct { + // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple + // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes + // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which + // have a known method to easily compute their private key, such as Debian Weak + // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at + // common key sizes, so we restrict all issuance to those common key sizes. + RSA2048 bool + RSA3072 bool + RSA4096 bool + // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid + // points on the NIST P-256, P-384, or P-521 elliptic curves. + ECDSAP256 bool + ECDSAP384 bool + ECDSAP521 bool +} + +// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's +// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA +// 4096, ECDSA 256 and ECDSA P384. +// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate +// If this is ever changed, the CP/CPS MUST be changed first. +func LetsEncryptCPS() AllowedKeys { + return AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + } +} + // ErrBadKey represents an error with a key. It is distinct from the various // ways in which an ACME request can have an erroneous key (BadPublicKeyError, // BadCSRError) because this library is used to check both JWS signing keys and @@ -74,28 +111,29 @@ type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) // KeyPolicy determines which types of key may be used with various boulder // operations. type KeyPolicy struct { - AllowRSA bool // Whether RSA keys should be allowed. - AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed. - AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed. - weakRSAList *WeakRSAKeys - blockedList *blockedKeys - fermatRounds int - blockedCheck BlockedKeyCheckFunc + allowedKeys AllowedKeys + weakRSAList *WeakRSAKeys + blockedList *blockedKeys + fermatRounds int + blockedCheck BlockedKeyCheckFunc } -// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. -// weakKeyFile contains the path to a JSON file containing truncated modulus -// hashes of known weak RSA keys. If this argument is empty RSA modulus hash -// checking will be disabled. blockedKeyFile contains the path to a YAML file -// containing Base64 encoded SHA256 hashes of pkix subject public keys that -// should be blocked. If this argument is empty then no blocked key checking is -// performed. -func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { +// NewPolicy returns a key policy based on the given configuration, with sane +// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys +// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those +// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization +// is disabled. +func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { + if config == nil { + config = &Config{} + } kp := KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - blockedCheck: bkc, + blockedCheck: bkc, + } + if config.AllowedKeys == nil { + kp.allowedKeys = LetsEncryptCPS() + } else { + kp.allowedKeys = *config.AllowedKeys } if config.WeakKeyFile != "" { keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) @@ -264,44 +302,30 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { // Simply use a whitelist for now. params := c.Params() switch { - case policy.AllowECDSANISTP256 && params == elliptic.P256().Params(): + case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params(): + return nil + case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params(): return nil - case policy.AllowECDSANISTP384 && params == elliptic.P384().Params(): + case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params(): return nil default: return badKey("ECDSA curve %v not allowed", params.Name) } } -// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple -// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes -// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which -// have a known method to easily compute their private key, such as Debian Weak -// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at -// common key sizes, so we restrict all issuance to those common key sizes. -var acceptableRSAKeySizes = map[int]bool{ - 2048: true, - 3072: true, - 4096: true, -} - // GoodKeyRSA determines if a RSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { - if !policy.AllowRSA { - return badKey("RSA keys are not allowed") +func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error { + modulus := key.N + + err := policy.goodRSABitLen(key) + if err != nil { + return err } + if policy.weakRSAList != nil && policy.weakRSAList.Known(key) { return badKey("key is on a known weak RSA key list") } - modulus := key.N - - // See comment on acceptableRSAKeySizes above. - modulusBitLen := modulus.BitLen() - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - // Rather than support arbitrary exponents, which significantly increases // the size of the key space we allow, we restrict E to the defacto standard // RSA exponent 65537. There is no specific standards document that specifies @@ -341,6 +365,21 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { return nil } +func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error { + // See comment on AllowedKeys above. + modulusBitLen := key.N.BitLen() + switch { + case modulusBitLen == 2048 && policy.allowedKeys.RSA2048: + return nil + case modulusBitLen == 3072 && policy.allowedKeys.RSA3072: + return nil + case modulusBitLen == 4096 && policy.allowedKeys.RSA4096: + return nil + default: + return badKey("key size not supported: %d", modulusBitLen) + } +} + // Returns true iff integer i is divisible by any of the primes in smallPrimes. // // Short circuits; execution time is dependent on i. Do not use this on secret @@ -400,7 +439,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { b2 := new(big.Int) b2.Mul(a, a).Sub(b2, n) - for i := 0; i < rounds; i++ { + for range rounds { // To see if b2 is a perfect square, we take its square root, square that, // and check to see if we got the same result back. bb.Sqrt(b2).Mul(bb, bb) diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 53d7560ec..52ee2a3dc 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.45.1. By combining all the individual C code files into this +** version 3.46.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,7 +19,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** e876e51a0ed5c5b3126f52e532044363a014. +** c9c2ab54ba1f5f46360f1b4f35d849cd3f08. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 @@ -460,9 +460,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.45.1" -#define SQLITE_VERSION_NUMBER 3045001 -#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a" +#define SQLITE_VERSION "3.46.1" +#define SQLITE_VERSION_NUMBER 3046001 +#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -734,6 +734,8 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. **
  • The application must not modify the SQL statement text passed into ** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not dereference the arrays or string pointers +** passed as the 3rd and 4th callback parameters after it returns. ** */ SQLITE_API int sqlite3_exec( @@ -1076,11 +1078,11 @@ struct sqlite3_file { ** ** xLock() upgrades the database file lock. In other words, xLock() moves the ** database file lock in the direction NONE toward EXCLUSIVE. The argument to -** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** requested lock, then the call to xLock() is a no-op. ** xUnlock() downgrades the database file lock to either SHARED or NONE. -* If the lock is already at or below the requested lock state, then the call +** If the lock is already at or below the requested lock state, then the call ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, @@ -2455,6 +2457,22 @@ struct sqlite3_mem_methods { ** configuration setting is never used, then the default maximum is determined ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that ** compile-time option is not set, then the default maximum is 1073741824. +** +** [[SQLITE_CONFIG_ROWID_IN_VIEW]] +**
    SQLITE_CONFIG_ROWID_IN_VIEW +**
    The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability +** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is +** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability +** defaults to on. This configuration option queries the current setting or +** changes the setting to off or on. The argument is a pointer to an integer. +** If that integer initially holds a value of 1, then the ability for VIEWs to +** have ROWIDs is activated. If the integer initially holds zero, then the +** ability is deactivated. Any other initial value for the integer leaves the +** setting unchanged. After changes, if any, the integer is written with +** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite +** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and +** recommended case) then the integer is always filled with zero, regardless +** if its initial value. ** */ #define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ @@ -2486,6 +2504,7 @@ struct sqlite3_mem_methods { #define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */ #define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */ #define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */ +#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */ /* ** CAPI3REF: Database Connection Configuration Options @@ -3600,8 +3619,8 @@ SQLITE_API int sqlite3_set_authorizer( #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 +** CAPI3REF: Deprecated Tracing And Profiling Functions +** DEPRECATED ** ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** instead of the routines described here. @@ -7182,6 +7201,12 @@ SQLITE_API int sqlite3_autovacuum_pages( ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** +** Whether the update hook is invoked before or after the +** corresponding change is currently unspecified and may differ +** depending on the type of change. Do not rely on the order of the +** hook call with regards to the final result of the operation which +** triggers the hook. +** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the @@ -8652,7 +8677,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** The sqlite3_keyword_count() interface returns the number of distinct ** keywords understood by SQLite. ** -** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** makes *Z point to that keyword expressed as UTF8 and writes the number ** of bytes in the keyword into *L. The string that *Z points to is not ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns @@ -10231,24 +10256,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
  • ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** that the query planner does not need the rows returned in any particular -** order, as long as rows with the same values in all "aOrderBy" columns -** are adjacent.)^ ^(Furthermore, only a single row for each particular -** combination of values in the columns identified by the "aOrderBy" field -** needs to be returned.)^ ^It is always ok for two or more rows with the same -** values in all "aOrderBy" columns to be returned, as long as all such rows -** are adjacent. ^The virtual table may, if it chooses, omit extra rows -** that have the same value for all columns identified by "aOrderBy". -** ^However omitting the extra rows is optional. +** order, as long as rows with the same values in all columns identified +** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows +** contain the same values for all columns identified by "colUsed", all but +** one such row may optionally be omitted from the result.)^ +** The virtual table is not required to omit rows that are duplicates +** over the "colUsed" columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. ** This mode is used for a DISTINCT query. **

  • -** ^(If the sqlite3_vtab_distinct() interface returns 3, that means -** that the query planner needs only distinct rows but it does need the -** rows to be sorted.)^ ^The virtual table implementation is free to omit -** rows that are identical in all aOrderBy columns, if it wants to, but -** it is not required to omit any rows. This mode is used for queries +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the +** virtual table must return rows in the order defined by "aOrderBy" as +** if the sqlite3_vtab_distinct() interface had returned 0. However if +** two or more rows in the result have the same values for all columns +** identified by "colUsed", then all but one such row may optionally be +** omitted.)^ Like when the return value is 2, the virtual table +** is not required to omit rows that are duplicates over the "colUsed" +** columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. +** This mode is used for queries ** that have both DISTINCT and ORDER BY clauses. ** ** +**

    The following table summarizes the conditions under which the +** virtual table is allowed to set the "orderByConsumed" flag based on +** the value returned by sqlite3_vtab_distinct(). This table is a +** restatement of the previous four paragraphs: +** +** +** +**
    sqlite3_vtab_distinct() return value +** Rows are returned in aOrderBy order +** Rows with the same value in all aOrderBy columns are adjacent +** Duplicates over all colUsed columns may be omitted +**
    0yesyesno +**
    1noyesno +**
    2noyesyes +**
    3yesyesyes +**
    +** ** ^For the purposes of comparing virtual table output values to see if the ** values are same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" @@ -12293,6 +12339,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); +/* +** CAPI3REF: Add A Single Change To A Changegroup +** METHOD: sqlite3_changegroup +** +** This function adds the single change currently indicated by the iterator +** passed as the second argument to the changegroup object. The rules for +** adding the change are just as described for [sqlite3changegroup_add()]. +** +** If the change is successfully added to the changegroup, SQLITE_OK is +** returned. Otherwise, an SQLite error code is returned. +** +** The iterator must point to a valid entry when this function is called. +** If it does not, SQLITE_ERROR is returned and no change is added to the +** changegroup. Additionally, the iterator must not have been opened with +** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also +** returned. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup*, + sqlite3_changeset_iter* +); + + + /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** METHOD: sqlite3_changegroup @@ -13097,8 +13167,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -14296,6 +14366,8 @@ struct fts5_api { # define SQLITE_OMIT_ALTERTABLE #endif +#define SQLITE_DIGIT_SEPARATOR '_' + /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() @@ -14588,8 +14660,8 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_TRUEFALSE 170 #define TK_ISNOT 171 #define TK_FUNCTION 172 -#define TK_UMINUS 173 -#define TK_UPLUS 174 +#define TK_UPLUS 173 +#define TK_UMINUS 174 #define TK_TRUTH 175 #define TK_REGISTER 176 #define TK_VECTOR 177 @@ -14598,8 +14670,9 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_ASTERISK 180 #define TK_SPAN 181 #define TK_ERROR 182 -#define TK_SPACE 183 -#define TK_ILLEGAL 184 +#define TK_QNUMBER 183 +#define TK_SPACE 184 +#define TK_ILLEGAL 185 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14861,7 +14934,7 @@ typedef INT16_TYPE LogEst; # define SQLITE_PTRSIZE __SIZEOF_POINTER__ # elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(_M_ARM) || defined(__arm__) || defined(__x86) || \ - (defined(__APPLE__) && defined(__POWERPC__)) || \ + (defined(__APPLE__) && defined(__ppc__)) || \ (defined(__TOS_AIX__) && !defined(__64BIT__)) # define SQLITE_PTRSIZE 4 # else @@ -15098,6 +15171,7 @@ SQLITE_PRIVATE u32 sqlite3TreeTrace; ** 0x00010000 Beginning of DELETE/INSERT/UPDATE processing ** 0x00020000 Transform DISTINCT into GROUP BY ** 0x00040000 SELECT tree dump after all code has been generated +** 0x00080000 NOT NULL strength reduction */ /* @@ -15128,7 +15202,7 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; ** 0x00000010 Display sqlite3_index_info xBestIndex calls ** 0x00000020 Range an equality scan metrics ** 0x00000040 IN operator decisions -** 0x00000080 WhereLoop cost adjustements +** 0x00000080 WhereLoop cost adjustments ** 0x00000100 ** 0x00000200 Covering index decisions ** 0x00000400 OR optimization @@ -16277,6 +16351,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + sqlite3_value *aCnt, /* OUT: entry counts for each btree in aRoot[] */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -16547,12 +16622,12 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Vacuum 5 #define OP_VFilter 6 /* jump, synopsis: iplan=r[P3] zplan='P4' */ #define OP_VUpdate 7 /* synopsis: data=r[P3@P2] */ -#define OP_Init 8 /* jump, synopsis: Start at P2 */ +#define OP_Init 8 /* jump0, synopsis: Start at P2 */ #define OP_Goto 9 /* jump */ #define OP_Gosub 10 /* jump */ -#define OP_InitCoroutine 11 /* jump */ -#define OP_Yield 12 /* jump */ -#define OP_MustBeInt 13 /* jump */ +#define OP_InitCoroutine 11 /* jump0 */ +#define OP_Yield 12 /* jump0 */ +#define OP_MustBeInt 13 /* jump0 */ #define OP_Jump 14 /* jump */ #define OP_Once 15 /* jump */ #define OP_If 16 /* jump */ @@ -16560,22 +16635,22 @@ typedef struct VdbeOpList VdbeOpList; #define OP_IsType 18 /* jump, synopsis: if typeof(P1.P3) in P5 goto P2 */ #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ #define OP_IfNullRow 20 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ -#define OP_SeekLT 21 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekLE 22 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGE 23 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGT 24 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekLT 21 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekLE 22 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekGE 23 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekGT 24 /* jump0, synopsis: key=r[P3@P4] */ #define OP_IfNotOpen 25 /* jump, synopsis: if( !csr[P1] ) goto P2 */ #define OP_IfNoHope 26 /* jump, synopsis: key=r[P3@P4] */ #define OP_NoConflict 27 /* jump, synopsis: key=r[P3@P4] */ #define OP_NotFound 28 /* jump, synopsis: key=r[P3@P4] */ #define OP_Found 29 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekRowid 30 /* jump, synopsis: intkey=r[P3] */ +#define OP_SeekRowid 30 /* jump0, synopsis: intkey=r[P3] */ #define OP_NotExists 31 /* jump, synopsis: intkey=r[P3] */ -#define OP_Last 32 /* jump */ -#define OP_IfSmaller 33 /* jump */ +#define OP_Last 32 /* jump0 */ +#define OP_IfSizeBetween 33 /* jump */ #define OP_SorterSort 34 /* jump */ #define OP_Sort 35 /* jump */ -#define OP_Rewind 36 /* jump */ +#define OP_Rewind 36 /* jump0 */ #define OP_SorterNext 37 /* jump */ #define OP_Prev 38 /* jump */ #define OP_Next 39 /* jump */ @@ -16587,7 +16662,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_IdxGE 45 /* jump, synopsis: key=r[P3@P4] */ #define OP_RowSetRead 46 /* jump, synopsis: r[P3]=rowset(P1) */ #define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 48 /* jump */ +#define OP_Program 48 /* jump0 */ #define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ #define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ @@ -16617,7 +16692,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Null 75 /* synopsis: r[P2..P3]=NULL */ #define OP_SoftNull 76 /* synopsis: r[P1]=NULL */ #define OP_Blob 77 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1,P4) */ +#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1) */ #define OP_Move 79 /* synopsis: r[P2@P3]=r[P1@P3] */ #define OP_Copy 80 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ #define OP_SCopy 81 /* synopsis: r[P2]=r[P1] */ @@ -16741,14 +16816,15 @@ typedef struct VdbeOpList VdbeOpList; #define OPFLG_OUT2 0x10 /* out2: P2 is an output */ #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ #define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */ +#define OPFLG_JUMP0 0x80 /* jump0: P2 might be zero */ #define OPFLG_INITIALIZER {\ /* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\ -/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\ -/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\ -/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\ -/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\ +/* 8 */ 0x81, 0x01, 0x01, 0x81, 0x83, 0x83, 0x01, 0x01,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0xc9, 0xc9, 0xc9,\ +/* 24 */ 0xc9, 0x01, 0x49, 0x49, 0x49, 0x49, 0xc9, 0x49,\ +/* 32 */ 0xc1, 0x01, 0x41, 0x41, 0xc1, 0x01, 0x41, 0x41,\ /* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ -/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 48 */ 0x81, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ /* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ @@ -16908,6 +16984,8 @@ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); SQLITE_PRIVATE int sqlite3VdbeHasSubProgram(Vdbe*); +SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val); + SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context*); #ifdef SQLITE_ENABLE_BYTECODE_VTAB SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3*); @@ -17495,6 +17573,10 @@ struct FuncDefHash { }; #define SQLITE_FUNC_HASH(C,L) (((C)+(L))%SQLITE_FUNC_HASH_SZ) +#if defined(SQLITE_USER_AUTHENTICATION) +# warning "The SQLITE_USER_AUTHENTICATION extension is deprecated. \ + See ext/userauth/user-auth.txt for details." +#endif #ifdef SQLITE_USER_AUTHENTICATION /* ** Information held in the "sqlite3" database connection object and used @@ -17798,7 +17880,7 @@ struct sqlite3 { #define SQLITE_CursorHints 0x00000400 /* Add OP_CursorHint opcodes */ #define SQLITE_Stat4 0x00000800 /* Use STAT4 data */ /* TH3 expects this value ^^^^^^^^^^ to be 0x0000800. Don't change it */ -#define SQLITE_PushDown 0x00001000 /* The push-down optimization */ +#define SQLITE_PushDown 0x00001000 /* WHERE-clause push-down opt */ #define SQLITE_SimplifyJoin 0x00002000 /* Convert LEFT JOIN to JOIN */ #define SQLITE_SkipScan 0x00004000 /* Skip-scans */ #define SQLITE_PropagateConst 0x00008000 /* The constant propagation opt */ @@ -18371,8 +18453,7 @@ struct Table { #define TF_HasStored 0x00000040 /* Has one or more STORED columns */ #define TF_HasGenerated 0x00000060 /* Combo: HasVirtual + HasStored */ #define TF_WithoutRowid 0x00000080 /* No rowid. PRIMARY KEY is the key */ -#define TF_StatsUsed 0x00000100 /* Query planner decisions affected by - ** Index.aiRowLogEst[] values */ +#define TF_MaybeReanalyze 0x00000100 /* Maybe run ANALYZE on this table */ #define TF_NoVisibleRowid 0x00000200 /* No user-visible "rowid" column */ #define TF_OOOHidden 0x00000400 /* Out-of-Order hidden columns */ #define TF_HasNotNull 0x00000800 /* Contains NOT NULL constraints */ @@ -18428,6 +18509,15 @@ struct Table { #define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0) #define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0) +/* Macro is true if the SQLITE_ALLOW_ROWID_IN_VIEW (mis-)feature is +** available. By default, this macro is false +*/ +#ifndef SQLITE_ALLOW_ROWID_IN_VIEW +# define ViewCanHaveRowid 0 +#else +# define ViewCanHaveRowid (sqlite3Config.mNoVisibleRowid==0) +#endif + /* ** Each foreign key constraint is an instance of the following structure. ** @@ -19163,10 +19253,12 @@ struct IdList { ** ** Union member validity: ** -** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc -** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy -** u2.pIBIndex fg.isIndexedBy && !fg.isCte -** u2.pCteUse fg.isCte && !fg.isIndexedBy +** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc +** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy +** u1.nRow !fg.isTabFunc && !fg.isIndexedBy +** +** u2.pIBIndex fg.isIndexedBy && !fg.isCte +** u2.pCteUse fg.isCte && !fg.isIndexedBy */ struct SrcItem { Schema *pSchema; /* Schema to which this item is fixed */ @@ -19194,6 +19286,7 @@ struct SrcItem { unsigned isOn :1; /* u3.pOn was once valid and non-NULL */ unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ + unsigned rowidUsed :1; /* The ROWID of this table is referenced */ } fg; int iCursor; /* The VDBE cursor number used to access this table */ union { @@ -19204,6 +19297,7 @@ struct SrcItem { union { char *zIndexedBy; /* Identifier from "INDEXED BY " clause */ ExprList *pFuncArg; /* Arguments to table-valued-function */ + u32 nRow; /* Number of rows in a VALUES clause */ } u1; union { Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ @@ -19268,7 +19362,7 @@ struct SrcList { #define WHERE_AGG_DISTINCT 0x0400 /* Query is "SELECT agg(DISTINCT ...)" */ #define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ #define WHERE_RIGHT_JOIN 0x1000 /* Processing a RIGHT JOIN */ - /* 0x2000 not currently used */ +#define WHERE_KEEP_ALL_JOINS 0x2000 /* Do not do the omit-noop-join opt */ #define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ /* 0x8000 not currently used */ @@ -19347,6 +19441,7 @@ struct NameContext { #define NC_InAggFunc 0x020000 /* True if analyzing arguments to an agg func */ #define NC_FromDDL 0x040000 /* SQL text comes from sqlite_schema */ #define NC_NoSelect 0x080000 /* Do not descend into sub-selects */ +#define NC_Where 0x100000 /* Processing WHERE clause of a SELECT */ #define NC_OrderAgg 0x8000000 /* Has an aggregate other than count/min/max */ /* @@ -19370,6 +19465,7 @@ struct Upsert { Expr *pUpsertWhere; /* WHERE clause for the ON CONFLICT UPDATE */ Upsert *pNextUpsert; /* Next ON CONFLICT clause in the list */ u8 isDoUpdate; /* True for DO UPDATE. False for DO NOTHING */ + u8 isDup; /* True if 2nd or later with same pUpsertIdx */ /* Above this point is the parse tree for the ON CONFLICT clauses. ** The next group of fields stores intermediate data. */ void *pToFree; /* Free memory when deleting the Upsert object */ @@ -19459,11 +19555,12 @@ struct Select { #define SF_View 0x0200000 /* SELECT statement is a view */ #define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */ #define SF_UFSrcCheck 0x0800000 /* Check pSrc as required by UPDATE...FROM */ -#define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */ +#define SF_PushDown 0x1000000 /* Modified by WHERE-clause push-down opt */ #define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ #define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ +#define SF_Correlated 0x20000000 /* True if references the outer context */ /* True if S exists and has SF_NestedFrom */ #define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0) @@ -19703,6 +19800,7 @@ struct Parse { u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ + u8 bHasWith; /* True if statement contains WITH */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif @@ -20140,6 +20238,11 @@ struct Sqlite3Config { #endif #ifndef SQLITE_UNTESTABLE int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ +#endif +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + u32 mNoVisibleRowid; /* TF_NoVisibleRowid if the ROWID_IN_VIEW + ** feature is disabled. 0 if rowids can + ** occur in views. */ #endif int bLocaltimeFault; /* True to fail localtime() calls */ int (*xAltLocaltime)(const void*,void*); /* Alternative localtime() routine */ @@ -20377,6 +20480,9 @@ struct Window { ** due to the SQLITE_SUBTYPE flag */ }; +SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList *pRow); +SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal); + #ifndef SQLITE_OMIT_WINDOWFUNC SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3*, Window*); SQLITE_PRIVATE void sqlite3WindowUnlinkFromSelect(Window*); @@ -20596,10 +20702,13 @@ SQLITE_PRIVATE void sqlite3MutexWarnOnContention(sqlite3_mutex*); # define EXP754 (((u64)0x7ff)<<52) # define MAN754 ((((u64)1)<<52)-1) # define IsNaN(X) (((X)&EXP754)==EXP754 && ((X)&MAN754)!=0) +# define IsOvfl(X) (((X)&EXP754)==EXP754) SQLITE_PRIVATE int sqlite3IsNaN(double); +SQLITE_PRIVATE int sqlite3IsOverflow(double); #else -# define IsNaN(X) 0 -# define sqlite3IsNaN(X) 0 +# define IsNaN(X) 0 +# define sqlite3IsNaN(X) 0 +# define sqlite3IsOVerflow(X) 0 #endif /* @@ -20691,6 +20800,7 @@ SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*); SQLITE_PRIVATE void sqlite3DequoteExpr(Expr*); SQLITE_PRIVATE void sqlite3DequoteToken(Token*); +SQLITE_PRIVATE void sqlite3DequoteNumber(Parse*, Expr*); SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*); SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*); @@ -20721,7 +20831,7 @@ SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*) SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*); -SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); +SQLITE_PRIVATE int sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); @@ -20944,12 +21054,10 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); SQLITE_PRIVATE u32 sqlite3IsTrueOrFalse(const char*); SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr*); SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Parse*,Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); -SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif @@ -21134,7 +21242,9 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3*); SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int); +#if !defined(SQLITE_OMIT_BLOB_LITERAL) SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); +#endif SQLITE_PRIVATE u8 sqlite3HexToInt(int h); SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); @@ -21445,7 +21555,7 @@ SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3*,Upsert*); SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3*,Upsert*); -SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*); +SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDoUpdate(Parse*,Upsert*,Table*,Index*,int); SQLITE_PRIVATE Upsert *sqlite3UpsertOfIndex(Upsert*,Index*); SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert*); @@ -21835,6 +21945,9 @@ static const char * const sqlite3azCompileOpt[] = { "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN), # endif #endif +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + "ALLOW_ROWID_IN_VIEW", +#endif #ifdef SQLITE_ALLOW_URI_AUTHORITY "ALLOW_URI_AUTHORITY", #endif @@ -22854,6 +22967,9 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { #endif #ifndef SQLITE_UNTESTABLE 0, /* xTestCallback */ +#endif +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + 0, /* mNoVisibleRowid. 0 == allow rowid-in-view */ #endif 0, /* bLocaltimeFault */ 0, /* xAltLocaltime */ @@ -24175,13 +24291,14 @@ struct DateTime { int tz; /* Timezone offset in minutes */ double s; /* Seconds */ char validJD; /* True (1) if iJD is valid */ - char rawS; /* Raw numeric value stored in s */ char validYMD; /* True (1) if Y,M,D are valid */ char validHMS; /* True (1) if h,m,s are valid */ - char validTZ; /* True (1) if tz is valid */ - char tzSet; /* Timezone was set explicitly */ - char isError; /* An overflow has occurred */ - char useSubsec; /* Display subsecond precision */ + char nFloor; /* Days to implement "floor" */ + unsigned rawS : 1; /* Raw numeric value stored in s */ + unsigned isError : 1; /* An overflow has occurred */ + unsigned useSubsec : 1; /* Display subsecond precision */ + unsigned isUtc : 1; /* Time is known to be UTC */ + unsigned isLocal : 1; /* Time is known to be localtime */ }; @@ -24279,6 +24396,8 @@ static int parseTimezone(const char *zDate, DateTime *p){ sgn = +1; }else if( c=='Z' || c=='z' ){ zDate++; + p->isLocal = 0; + p->isUtc = 1; goto zulu_time; }else{ return c!=0; @@ -24291,7 +24410,6 @@ static int parseTimezone(const char *zDate, DateTime *p){ p->tz = sgn*(nMn + nHr*60); zulu_time: while( sqlite3Isspace(*zDate) ){ zDate++; } - p->tzSet = 1; return *zDate!=0; } @@ -24335,7 +24453,6 @@ static int parseHhMmSs(const char *zDate, DateTime *p){ p->m = m; p->s = s + ms; if( parseTimezone(zDate, p) ) return 1; - p->validTZ = (p->tz!=0)?1:0; return 0; } @@ -24382,15 +24499,40 @@ static void computeJD(DateTime *p){ p->validJD = 1; if( p->validHMS ){ p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000 + 0.5); - if( p->validTZ ){ + if( p->tz ){ p->iJD -= p->tz*60000; p->validYMD = 0; p->validHMS = 0; - p->validTZ = 0; + p->tz = 0; + p->isUtc = 1; + p->isLocal = 0; } } } +/* +** Given the YYYY-MM-DD information current in p, determine if there +** is day-of-month overflow and set nFloor to the number of days that +** would need to be subtracted from the date in order to bring the +** date back to the end of the month. +*/ +static void computeFloor(DateTime *p){ + assert( p->validYMD || p->isError ); + assert( p->D>=0 && p->D<=31 ); + assert( p->M>=0 && p->M<=12 ); + if( p->D<=28 ){ + p->nFloor = 0; + }else if( (1<M) & 0x15aa ){ + p->nFloor = 0; + }else if( p->M!=2 ){ + p->nFloor = (p->D==31); + }else if( p->Y%4!=0 || (p->Y%100==0 && p->Y%400!=0) ){ + p->nFloor = p->D - 28; + }else{ + p->nFloor = p->D - 29; + } +} + /* ** Parse dates of the form ** @@ -24429,12 +24571,16 @@ static int parseYyyyMmDd(const char *zDate, DateTime *p){ p->Y = neg ? -Y : Y; p->M = M; p->D = D; - if( p->validTZ ){ + computeFloor(p); + if( p->tz ){ computeJD(p); } return 0; } + +static void clearYMD_HMS_TZ(DateTime *p); /* Forward declaration */ + /* ** Set the time to the current time reported by the VFS. ** @@ -24444,6 +24590,9 @@ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ p->iJD = sqlite3StmtCurrentTime(context); if( p->iJD>0 ){ p->validJD = 1; + p->isUtc = 1; + p->isLocal = 0; + clearYMD_HMS_TZ(p); return 0; }else{ return 1; @@ -24582,7 +24731,7 @@ static void computeYMD_HMS(DateTime *p){ static void clearYMD_HMS_TZ(DateTime *p){ p->validYMD = 0; p->validHMS = 0; - p->validTZ = 0; + p->tz = 0; } #ifndef SQLITE_OMIT_LOCALTIME @@ -24714,7 +24863,7 @@ static int toLocaltime( p->validHMS = 1; p->validJD = 0; p->rawS = 0; - p->validTZ = 0; + p->tz = 0; p->isError = 0; return SQLITE_OK; } @@ -24734,12 +24883,12 @@ static const struct { float rLimit; /* Maximum NNN value for this transform */ float rXform; /* Constant used for this transform */ } aXformType[] = { - { 6, "second", 4.6427e+14, 1.0 }, - { 6, "minute", 7.7379e+12, 60.0 }, - { 4, "hour", 1.2897e+11, 3600.0 }, - { 3, "day", 5373485.0, 86400.0 }, - { 5, "month", 176546.0, 2592000.0 }, - { 4, "year", 14713.0, 31536000.0 }, + /* 0 */ { 6, "second", 4.6427e+14, 1.0 }, + /* 1 */ { 6, "minute", 7.7379e+12, 60.0 }, + /* 2 */ { 4, "hour", 1.2897e+11, 3600.0 }, + /* 3 */ { 3, "day", 5373485.0, 86400.0 }, + /* 4 */ { 5, "month", 176546.0, 30.0*86400.0 }, + /* 5 */ { 4, "year", 14713.0, 365.0*86400.0 }, }; /* @@ -24771,14 +24920,20 @@ static void autoAdjustDate(DateTime *p){ ** NNN.NNNN seconds ** NNN months ** NNN years +** +/-YYYY-MM-DD HH:MM:SS.SSS +** ceiling +** floor ** start of month ** start of year ** start of week ** start of day ** weekday N ** unixepoch +** auto ** localtime ** utc +** subsec +** subsecond ** ** Return 0 on success and 1 if there is any kind of error. If the error ** is in a system call (i.e. localtime()), then an error message is written @@ -24809,6 +24964,37 @@ static int parseModifier( } break; } + case 'c': { + /* + ** ceiling + ** + ** Resolve day-of-month overflow by rolling forward into the next + ** month. As this is the default action, this modifier is really + ** a no-op that is only included for symmetry. See "floor". + */ + if( sqlite3_stricmp(z, "ceiling")==0 ){ + computeJD(p); + clearYMD_HMS_TZ(p); + rc = 0; + p->nFloor = 0; + } + break; + } + case 'f': { + /* + ** floor + ** + ** Resolve day-of-month overflow by rolling back to the end of the + ** previous month. + */ + if( sqlite3_stricmp(z, "floor")==0 ){ + computeJD(p); + p->iJD -= p->nFloor*86400000; + clearYMD_HMS_TZ(p); + rc = 0; + } + break; + } case 'j': { /* ** julianday @@ -24835,7 +25021,9 @@ static int parseModifier( ** show local time. */ if( sqlite3_stricmp(z, "localtime")==0 && sqlite3NotPureFunc(pCtx) ){ - rc = toLocaltime(p, pCtx); + rc = p->isLocal ? SQLITE_OK : toLocaltime(p, pCtx); + p->isUtc = 0; + p->isLocal = 1; } break; } @@ -24860,7 +25048,7 @@ static int parseModifier( } #ifndef SQLITE_OMIT_LOCALTIME else if( sqlite3_stricmp(z, "utc")==0 && sqlite3NotPureFunc(pCtx) ){ - if( p->tzSet==0 ){ + if( p->isUtc==0 ){ i64 iOrigJD; /* Original localtime */ i64 iGuess; /* Guess at the corresponding utc time */ int cnt = 0; /* Safety to prevent infinite loop */ @@ -24883,7 +25071,8 @@ static int parseModifier( memset(p, 0, sizeof(*p)); p->iJD = iGuess; p->validJD = 1; - p->tzSet = 1; + p->isUtc = 1; + p->isLocal = 0; } rc = SQLITE_OK; } @@ -24903,7 +25092,7 @@ static int parseModifier( && r>=0.0 && r<7.0 && (n=(int)r)==r ){ sqlite3_int64 Z; computeYMD_HMS(p); - p->validTZ = 0; + p->tz = 0; p->validJD = 0; computeJD(p); Z = ((p->iJD + 129600000)/86400000) % 7; @@ -24943,7 +25132,7 @@ static int parseModifier( p->h = p->m = 0; p->s = 0.0; p->rawS = 0; - p->validTZ = 0; + p->tz = 0; p->validJD = 0; if( sqlite3_stricmp(z,"month")==0 ){ p->D = 1; @@ -25014,6 +25203,7 @@ static int parseModifier( x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; + computeFloor(p); computeJD(p); p->validHMS = 0; p->validYMD = 0; @@ -25060,11 +25250,12 @@ static int parseModifier( z += n; while( sqlite3Isspace(*z) ) z++; n = sqlite3Strlen30(z); - if( n>10 || n<3 ) break; + if( n<3 || n>10 ) break; if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); assert( rc==1 ); rRounder = r<0 ? -0.5 : +0.5; + p->nFloor = 0; for(i=0; iM += (int)r; x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; + computeFloor(p); p->validJD = 0; r -= (int)r; break; } case 5: { /* Special processing to add years */ int y = (int)r; - assert( strcmp(aXformType[i].zName,"year")==0 ); + assert( strcmp(aXformType[5].zName,"year")==0 ); computeYMD_HMS(p); + assert( p->M>=0 && p->M<=12 ); p->Y += y; + computeFloor(p); p->validJD = 0; r -= (int)r; break; @@ -25340,22 +25534,83 @@ static void dateFunc( } } +/* +** Compute the number of days after the most recent January 1. +** +** In other words, compute the zero-based day number for the +** current year: +** +** Jan01 = 0, Jan02 = 1, ..., Jan31 = 30, Feb01 = 31, ... +** Dec31 = 364 or 365. +*/ +static int daysAfterJan01(DateTime *pDate){ + DateTime jan01 = *pDate; + assert( jan01.validYMD ); + assert( jan01.validHMS ); + assert( pDate->validJD ); + jan01.validJD = 0; + jan01.M = 1; + jan01.D = 1; + computeJD(&jan01); + return (int)((pDate->iJD-jan01.iJD+43200000)/86400000); +} + +/* +** Return the number of days after the most recent Monday. +** +** In other words, return the day of the week according +** to this code: +** +** 0=Monday, 1=Tuesday, 2=Wednesday, ..., 6=Sunday. +*/ +static int daysAfterMonday(DateTime *pDate){ + assert( pDate->validJD ); + return (int)((pDate->iJD+43200000)/86400000) % 7; +} + +/* +** Return the number of days after the most recent Sunday. +** +** In other words, return the day of the week according +** to this code: +** +** 0=Sunday, 1=Monday, 2=Tues, ..., 6=Saturday +*/ +static int daysAfterSunday(DateTime *pDate){ + assert( pDate->validJD ); + return (int)((pDate->iJD+129600000)/86400000) % 7; +} + /* ** strftime( FORMAT, TIMESTRING, MOD, MOD, ...) ** ** Return a string described by FORMAT. Conversions as follows: ** -** %d day of month +** %d day of month 01-31 +** %e day of month 1-31 ** %f ** fractional seconds SS.SSS +** %F ISO date. YYYY-MM-DD +** %G ISO year corresponding to %V 0000-9999. +** %g 2-digit ISO year corresponding to %V 00-99 ** %H hour 00-24 -** %j day of year 000-366 +** %k hour 0-24 (leading zero converted to space) +** %I hour 01-12 +** %j day of year 001-366 ** %J ** julian day number +** %l hour 1-12 (leading zero converted to space) ** %m month 01-12 ** %M minute 00-59 +** %p "am" or "pm" +** %P "AM" or "PM" +** %R time as HH:MM ** %s seconds since 1970-01-01 ** %S seconds 00-59 -** %w day of week 0-6 Sunday==0 -** %W week of year 00-53 +** %T time as HH:MM:SS +** %u day of week 1-7 Monday==1, Sunday==7 +** %w day of week 0-6 Sunday==0, Monday==1 +** %U week of year 00-53 (First Sunday is start of week 01) +** %V week of year 01-53 (First week containing Thursday is week 01) +** %W week of year 00-53 (First Monday is start of week 01) ** %Y year 0000-9999 ** %% % */ @@ -25392,7 +25647,7 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D); break; } - case 'f': { + case 'f': { /* Fractional seconds. (Non-standard) */ double s = x.s; if( s>59.999 ) s = 59.999; sqlite3_str_appendf(&sRes, "%06.3f", s); @@ -25402,6 +25657,21 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D); break; } + case 'G': /* Fall thru */ + case 'g': { + DateTime y = x; + assert( y.validJD ); + /* Move y so that it is the Thursday in the same week as x */ + y.iJD += (3 - daysAfterMonday(&x))*86400000; + y.validYMD = 0; + computeYMD(&y); + if( cf=='g' ){ + sqlite3_str_appendf(&sRes, "%02d", y.Y%100); + }else{ + sqlite3_str_appendf(&sRes, "%04d", y.Y); + } + break; + } case 'H': case 'k': { sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h); @@ -25415,25 +25685,11 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h); break; } - case 'W': /* Fall thru */ - case 'j': { - int nDay; /* Number of days since 1st day of year */ - DateTime y = x; - y.validJD = 0; - y.M = 1; - y.D = 1; - computeJD(&y); - nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( cf=='W' ){ - int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ - wd = (int)(((x.iJD+43200000)/86400000)%7); - sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7); - }else{ - sqlite3_str_appendf(&sRes,"%03d",nDay+1); - } + case 'j': { /* Day of year. Jan01==1, Jan02==2, and so forth */ + sqlite3_str_appendf(&sRes,"%03d",daysAfterJan01(&x)+1); break; } - case 'J': { + case 'J': { /* Julian day number. (Non-standard) */ sqlite3_str_appendf(&sRes,"%.16g",x.iJD/86400000.0); break; } @@ -25476,13 +25732,33 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s); break; } - case 'u': /* Fall thru */ - case 'w': { - char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + case 'u': /* Day of week. 1 to 7. Monday==1, Sunday==7 */ + case 'w': { /* Day of week. 0 to 6. Sunday==0, Monday==1 */ + char c = (char)daysAfterSunday(&x) + '0'; if( c=='0' && cf=='u' ) c = '7'; sqlite3_str_appendchar(&sRes, 1, c); break; } + case 'U': { /* Week num. 00-53. First Sun of the year is week 01 */ + sqlite3_str_appendf(&sRes,"%02d", + (daysAfterJan01(&x)-daysAfterSunday(&x)+7)/7); + break; + } + case 'V': { /* Week num. 01-53. First week with a Thur is week 01 */ + DateTime y = x; + /* Adjust y so that is the Thursday in the same week as x */ + assert( y.validJD ); + y.iJD += (3 - daysAfterMonday(&x))*86400000; + y.validYMD = 0; + computeYMD(&y); + sqlite3_str_appendf(&sRes,"%02d", daysAfterJan01(&y)/7+1); + break; + } + case 'W': { /* Week num. 00-53. First Mon of the year is week 01 */ + sqlite3_str_appendf(&sRes,"%02d", + (daysAfterJan01(&x)-daysAfterMonday(&x)+7)/7); + break; + } case 'Y': { sqlite3_str_appendf(&sRes,"%04d",x.Y); break; @@ -25629,9 +25905,7 @@ static void timediffFunc( d1.iJD = d2.iJD - d1.iJD; d1.iJD += (u64)1486995408 * (u64)100000; } - d1.validYMD = 0; - d1.validHMS = 0; - d1.validTZ = 0; + clearYMD_HMS_TZ(&d1); computeYMD_HMS(&d1); sqlite3StrAccumInit(&sRes, 0, 0, 0, 100); sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f", @@ -25700,6 +25974,36 @@ static void currentTimeFunc( } #endif +#if !defined(SQLITE_OMIT_DATETIME_FUNCS) && defined(SQLITE_DEBUG) +/* +** datedebug(...) +** +** This routine returns JSON that describes the internal DateTime object. +** Used for debugging and testing only. Subject to change. +*/ +static void datedebugFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + char *zJson; + zJson = sqlite3_mprintf( + "{iJD:%lld,Y:%d,M:%d,D:%d,h:%d,m:%d,tz:%d," + "s:%.3f,validJD:%d,validYMS:%d,validHMS:%d," + "nFloor:%d,rawS:%d,isError:%d,useSubsec:%d," + "isUtc:%d,isLocal:%d}", + x.iJD, x.Y, x.M, x.D, x.h, x.m, x.tz, + x.s, x.validJD, x.validYMD, x.validHMS, + x.nFloor, x.rawS, x.isError, x.useSubsec, + x.isUtc, x.isLocal); + sqlite3_result_text(context, zJson, -1, sqlite3_free); + } +} +#endif /* !SQLITE_OMIT_DATETIME_FUNCS && SQLITE_DEBUG */ + + /* ** This function registered all of the above C functions as SQL ** functions. This should be the only routine in this file with @@ -25715,6 +26019,9 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ PURE_DATE(datetime, -1, 0, 0, datetimeFunc ), PURE_DATE(strftime, -1, 0, 0, strftimeFunc ), PURE_DATE(timediff, 2, 0, 0, timediffFunc ), +#ifdef SQLITE_DEBUG + PURE_DATE(datedebug, -1, 0, 0, datedebugFunc ), +#endif DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), DFUNCTION(current_date, 0, 0, 0, cdateFunc ), @@ -30130,6 +30437,24 @@ static void sqlite3MallocAlarm(int nByte){ sqlite3_mutex_enter(mem0.mutex); } +#ifdef SQLITE_DEBUG +/* +** This routine is called whenever an out-of-memory condition is seen, +** It's only purpose to to serve as a breakpoint for gdb or similar +** code debuggers when working on out-of-memory conditions, for example +** caused by PRAGMA hard_heap_limit=N. +*/ +static SQLITE_NOINLINE void test_oom_breakpoint(u64 n){ + static u64 nOomFault = 0; + nOomFault += n; + /* The assert() is never reached in a human lifetime. It is here mostly + ** to prevent code optimizers from optimizing out this function. */ + assert( (nOomFault>>32) < 0xffffffff ); +} +#else +# define test_oom_breakpoint(X) /* No-op for production builds */ +#endif + /* ** Do a memory allocation with statistics and alarms. Assume the ** lock is already held. @@ -30156,6 +30481,7 @@ static void mallocWithAlarm(int n, void **pp){ if( mem0.hardLimit ){ nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); if( nUsed >= mem0.hardLimit - nFull ){ + test_oom_breakpoint(1); *pp = 0; return; } @@ -30444,6 +30770,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ sqlite3MallocAlarm(nDiff); if( mem0.hardLimit>0 && nUsed >= mem0.hardLimit - nDiff ){ sqlite3_mutex_leave(mem0.mutex); + test_oom_breakpoint(1); return 0; } } @@ -31310,6 +31637,7 @@ SQLITE_API void sqlite3_str_vappendf( if( xtype==etFLOAT ){ iRound = -precision; }else if( xtype==etGENERIC ){ + if( precision==0 ) precision = 1; iRound = precision; }else{ iRound = precision+1; @@ -31345,13 +31673,14 @@ SQLITE_API void sqlite3_str_vappendf( } exp = s.iDP-1; - if( xtype==etGENERIC && precision>0 ) precision--; /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ if( xtype==etGENERIC ){ + assert( precision>0 ); + precision--; flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){ xtype = etEXP; @@ -31667,9 +31996,13 @@ SQLITE_API void sqlite3_str_vappendf( sqlite3_str_appendall(pAccum, pItem->zAlias); }else{ Select *pSel = pItem->pSelect; - assert( pSel!=0 ); + assert( pSel!=0 ); /* Because of tag-20240424-1 */ if( pSel->selFlags & SF_NestedFrom ){ sqlite3_str_appendf(pAccum, "(join-%u)", pSel->selId); + }else if( pSel->selFlags & SF_MultiValue ){ + assert( !pItem->fg.isTabFunc && !pItem->fg.isIndexedBy ); + sqlite3_str_appendf(pAccum, "%u-ROW VALUES CLAUSE", + pItem->u1.nRow); }else{ sqlite3_str_appendf(pAccum, "(subquery-%u)", pSel->selId); } @@ -32446,8 +32779,10 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) x.printfFlags |= SQLITE_PRINTF_INTERNAL; sqlite3_str_appendf(&x, "{%d:*} %!S", pItem->iCursor, pItem); if( pItem->pTab ){ - sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx", - pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, pItem->colUsed); + sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx%s", + pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, + pItem->colUsed, + pItem->fg.rowidUsed ? "+rowid" : ""); } if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==(JT_LEFT|JT_RIGHT) ){ sqlite3_str_appendf(&x, " FULL-OUTER-JOIN"); @@ -32487,12 +32822,14 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) sqlite3TreeViewIdList(pView, pItem->u3.pUsing, (--n)>0, "USING"); } if( pItem->pSelect ){ + sqlite3TreeViewPush(&pView, i+1nSrc); if( pItem->pTab ){ Table *pTab = pItem->pTab; sqlite3TreeViewColumnList(pView, pTab->aCol, pTab->nCol, 1); } assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); sqlite3TreeViewSelect(pView, pItem->pSelect, (--n)>0); + sqlite3TreeViewPop(&pView); } if( pItem->fg.isTabFunc ){ sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:"); @@ -32596,7 +32933,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m sqlite3TreeViewItem(pView, "LIMIT", (n--)>0); sqlite3TreeViewExpr(pView, p->pLimit->pLeft, p->pLimit->pRight!=0); if( p->pLimit->pRight ){ - sqlite3TreeViewItem(pView, "OFFSET", (n--)>0); + sqlite3TreeViewItem(pView, "OFFSET", 0); sqlite3TreeViewExpr(pView, p->pLimit->pRight, 0); sqlite3TreeViewPop(&pView); } @@ -34641,6 +34978,19 @@ SQLITE_PRIVATE int sqlite3IsNaN(double x){ } #endif /* SQLITE_OMIT_FLOATING_POINT */ +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** Return true if the floating point value is NaN or +Inf or -Inf. +*/ +SQLITE_PRIVATE int sqlite3IsOverflow(double x){ + int rc; /* The value return */ + u64 y; + memcpy(&y,&x,sizeof(y)); + rc = IsOvfl(y); + return rc; +} +#endif /* SQLITE_OMIT_FLOATING_POINT */ + /* ** Compute a string length that is limited to what can be stored in ** lower 30 bits of a 32-bit signed integer. @@ -34884,6 +35234,44 @@ SQLITE_PRIVATE void sqlite3DequoteExpr(Expr *p){ sqlite3Dequote(p->u.zToken); } +/* +** Expression p is a QNUMBER (quoted number). Dequote the value in p->u.zToken +** and set the type to INTEGER or FLOAT. "Quoted" integers or floats are those +** that contain '_' characters that must be removed before further processing. +*/ +SQLITE_PRIVATE void sqlite3DequoteNumber(Parse *pParse, Expr *p){ + assert( p!=0 || pParse->db->mallocFailed ); + if( p ){ + const char *pIn = p->u.zToken; + char *pOut = p->u.zToken; + int bHex = (pIn[0]=='0' && (pIn[1]=='x' || pIn[1]=='X')); + int iValue; + assert( p->op==TK_QNUMBER ); + p->op = TK_INTEGER; + do { + if( *pIn!=SQLITE_DIGIT_SEPARATOR ){ + *pOut++ = *pIn; + if( *pIn=='e' || *pIn=='E' || *pIn=='.' ) p->op = TK_FLOAT; + }else{ + if( (bHex==0 && (!sqlite3Isdigit(pIn[-1]) || !sqlite3Isdigit(pIn[1]))) + || (bHex==1 && (!sqlite3Isxdigit(pIn[-1]) || !sqlite3Isxdigit(pIn[1]))) + ){ + sqlite3ErrorMsg(pParse, "unrecognized token: \"%s\"", p->u.zToken); + } + } + }while( *pIn++ ); + if( bHex ) p->op = TK_INTEGER; + + /* tag-20240227-a: If after dequoting, the number is an integer that + ** fits in 32 bits, then it must be converted into EP_IntValue. Other + ** parts of the code expect this. See also tag-20240227-b. */ + if( p->op==TK_INTEGER && sqlite3GetInt32(p->u.zToken, &iValue) ){ + p->u.iValue = iValue; + p->flags |= EP_IntValue; + } + } +} + /* ** If the input token p is quoted, try to adjust the token to remove ** the quotes. This is not always possible: @@ -35200,6 +35588,9 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en u64 s2; rr[0] = (double)s; s2 = (u64)rr[0]; +#if defined(_MSC_VER) && _MSC_VER<1700 + if( s2==0x8000000000000000LL ){ s2 = 2*(u64)(0.5*rr[0]); } +#endif rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s); if( e>0 ){ while( e>=100 ){ @@ -35642,7 +36033,7 @@ SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRou assert( p->n>0 ); assert( p->nzBuf) ); p->iDP = p->n + exp; - if( iRound<0 ){ + if( iRound<=0 ){ iRound = p->iDP - iRound; if( iRound==0 && p->zBuf[i+1]>='5' ){ iRound = 1; @@ -36820,7 +37211,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 30 */ "SeekRowid" OpHelp("intkey=r[P3]"), /* 31 */ "NotExists" OpHelp("intkey=r[P3]"), /* 32 */ "Last" OpHelp(""), - /* 33 */ "IfSmaller" OpHelp(""), + /* 33 */ "IfSizeBetween" OpHelp(""), /* 34 */ "SorterSort" OpHelp(""), /* 35 */ "Sort" OpHelp(""), /* 36 */ "Rewind" OpHelp(""), @@ -36865,7 +37256,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 75 */ "Null" OpHelp("r[P2..P3]=NULL"), /* 76 */ "SoftNull" OpHelp("r[P1]=NULL"), /* 77 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), + /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1)"), /* 79 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), /* 80 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), /* 81 */ "SCopy" OpHelp("r[P2]=r[P1]"), @@ -39263,8 +39654,12 @@ static int unixLogErrorAtLine( ** available, the error message will often be an empty string. Not a ** huge problem. Incorrectly concluding that the GNU version is available ** could lead to a segfault though. + ** + ** Forum post 3f13857fa4062301 reports that the Android SDK may use + ** int-type return, depending on its version. */ -#if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU) +#if (defined(STRERROR_R_CHAR_P) || defined(__USE_GNU)) \ + && !defined(ANDROID) && !defined(__ANDROID__) zErr = # endif strerror_r(iErrno, aErr, sizeof(aErr)-1); @@ -44362,12 +44757,19 @@ static int unixOpen( rc = SQLITE_READONLY_DIRECTORY; }else if( errno!=EISDIR && isReadWrite ){ /* Failed to open the file for read/write access. Try read-only. */ + UnixUnusedFd *pReadonly = 0; flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); openFlags &= ~(O_RDWR|O_CREAT); flags |= SQLITE_OPEN_READONLY; openFlags |= O_RDONLY; isReadonly = 1; - fd = robust_open(zName, openFlags, openMode); + pReadonly = findReusableFd(zName, flags); + if( pReadonly ){ + fd = pReadonly->fd; + sqlite3_free(pReadonly); + }else{ + fd = robust_open(zName, openFlags, openMode); + } } } if( fd<0 ){ @@ -53263,6 +53665,14 @@ SQLITE_API unsigned char *sqlite3_serialize( pOut = 0; }else{ sz = sqlite3_column_int64(pStmt, 0)*szPage; + if( sz==0 ){ + sqlite3_reset(pStmt); + sqlite3_exec(db, "BEGIN IMMEDIATE; COMMIT;", 0, 0, 0); + rc = sqlite3_step(pStmt); + if( rc==SQLITE_ROW ){ + sz = sqlite3_column_int64(pStmt, 0)*szPage; + } + } if( piSize ) *piSize = sz; if( mFlags & SQLITE_SERIALIZE_NOCOPY ){ pOut = 0; @@ -63786,7 +64196,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){ ** This will be either the rollback journal or the WAL file. */ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager *pPager){ -#if SQLITE_OMIT_WAL +#ifdef SQLITE_OMIT_WAL return pPager->jfd; #else return pPager->pWal ? sqlite3WalFile(pPager->pWal) : pPager->jfd; @@ -69810,6 +70220,7 @@ struct IntegrityCk { StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ sqlite3 *db; /* Database connection running the check */ + i64 nRow; /* Number of rows visited in current tree */ }; /* @@ -70284,8 +70695,47 @@ int corruptPageError(int lineno, MemPage *p){ # define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno) #endif +/* Default value for SHARED_LOCK_TRACE macro if shared-cache is disabled +** or if the lock tracking is disabled. This is always the value for +** release builds. +*/ +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) /*no-op*/ + #ifndef SQLITE_OMIT_SHARED_CACHE +#if 0 +/* ^---- Change to 1 and recompile to enable shared-lock tracing +** for debugging purposes. +** +** Print all shared-cache locks on a BtShared. Debugging use only. +*/ +static void sharedLockTrace( + BtShared *pBt, + const char *zMsg, + int iRoot, + int eLockType +){ + BtLock *pLock; + if( iRoot>0 ){ + printf("%s-%p %u%s:", zMsg, pBt, iRoot, eLockType==READ_LOCK?"R":"W"); + }else{ + printf("%s-%p:", zMsg, pBt); + } + for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ + printf(" %p/%u%s", pLock->pBtree, pLock->iTable, + pLock->eLock==READ_LOCK ? "R" : "W"); + while( pLock->pNext && pLock->pBtree==pLock->pNext->pBtree ){ + pLock = pLock->pNext; + printf(",%u%s", pLock->iTable, pLock->eLock==READ_LOCK ? "R" : "W"); + } + } + printf("\n"); + fflush(stdout); +} +#undef SHARED_LOCK_TRACE +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) sharedLockTrace(X,MSG,TAB,TYPE) +#endif /* Shared-lock tracing */ + #ifdef SQLITE_DEBUG /* **** This function is only used as part of an assert() statement. *** @@ -70362,6 +70812,8 @@ static int hasSharedCacheTableLock( iTab = iRoot; } + SHARED_LOCK_TRACE(pBtree->pBt,"hasLock",iRoot,eLockType); + /* Search for the required lock. Either a write-lock on root-page iTab, a ** write-lock on the schema table, or (if the client is reading) a ** read-lock on iTab will suffice. Return 1 if any of these are found. */ @@ -70495,6 +70947,8 @@ static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtLock *pLock = 0; BtLock *pIter; + SHARED_LOCK_TRACE(pBt,"setLock", iTable, eLock); + assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); @@ -70562,6 +71016,8 @@ static void clearAllSharedCacheTableLocks(Btree *p){ assert( p->sharable || 0==*ppIter ); assert( p->inTrans>0 ); + SHARED_LOCK_TRACE(pBt, "clearAllLocks", 0, 0); + while( *ppIter ){ BtLock *pLock = *ppIter; assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); @@ -70600,6 +71056,9 @@ static void clearAllSharedCacheTableLocks(Btree *p){ */ static void downgradeAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; + + SHARED_LOCK_TRACE(pBt, "downgradeLocks", 0, 0); + if( pBt->pWriter==p ){ BtLock *pLock; pBt->pWriter = 0; @@ -75213,9 +75672,12 @@ static int accessPayload( if( pCur->aOverflow==0 || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow) ){ - Pgno *aNew = (Pgno*)sqlite3Realloc( - pCur->aOverflow, nOvfl*2*sizeof(Pgno) - ); + Pgno *aNew; + if( sqlite3FaultSim(413) ){ + aNew = 0; + }else{ + aNew = (Pgno*)sqlite3Realloc(pCur->aOverflow, nOvfl*2*sizeof(Pgno)); + } if( aNew==0 ){ return SQLITE_NOMEM_BKPT; }else{ @@ -75225,6 +75687,12 @@ static int accessPayload( memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); pCur->curFlags |= BTCF_ValidOvfl; }else{ + /* Sanity check the validity of the overflow page cache */ + assert( pCur->aOverflow[0]==nextPage + || pCur->aOverflow[0]==0 + || CORRUPT_DB ); + assert( pCur->aOverflow[0]!=0 || pCur->aOverflow[offset/ovflSize]==0 ); + /* If the overflow page-list cache has been allocated and the ** entry for the first required overflow page is valid, skip ** directly to it. @@ -75706,6 +76174,23 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ return rc; } +#ifdef SQLITE_DEBUG +/* The cursors is CURSOR_VALID and has BTCF_AtLast set. Verify that +** this flags are true for a consistent database. +** +** This routine is is called from within assert() statements only. +** It is an internal verification routine and does not appear in production +** builds. +*/ +static int cursorIsAtLastEntry(BtCursor *pCur){ + int ii; + for(ii=0; iiiPage; ii++){ + if( pCur->aiIdx[ii]!=pCur->apPage[ii]->nCell ) return 0; + } + return pCur->ix==pCur->pPage->nCell-1 && pCur->pPage->leaf!=0; +} +#endif + /* Move the cursor to the last entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. @@ -75734,18 +76219,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ /* If the cursor already points to the last entry, this is a no-op. */ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ -#ifdef SQLITE_DEBUG - /* This block serves to assert() that the cursor really does point - ** to the last entry in the b-tree. */ - int ii; - for(ii=0; iiiPage; ii++){ - assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); - } - assert( pCur->ix==pCur->pPage->nCell-1 || CORRUPT_DB ); - testcase( pCur->ix!=pCur->pPage->nCell-1 ); - /* ^-- dbsqlfuzz b92b72e4de80b5140c30ab71372ca719b8feb618 */ - assert( pCur->pPage->leaf ); -#endif + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = 0; return SQLITE_OK; } @@ -75798,6 +76272,7 @@ SQLITE_PRIVATE int sqlite3BtreeTableMoveto( } if( pCur->info.nKeycurFlags & BTCF_AtLast)!=0 ){ + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = -1; return SQLITE_OK; } @@ -76264,10 +76739,10 @@ SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor *pCur){ assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - /* Currently this interface is only called by the OP_IfSmaller - ** opcode, and it that case the cursor will always be valid and - ** will always point to a leaf node. */ - if( NEVER(pCur->eState!=CURSOR_VALID) ) return -1; + /* Currently this interface is only called by the OP_IfSizeBetween + ** opcode and the OP_Count opcode with P3=1. In either case, + ** the cursor will always be valid unless the btree is empty. */ + if( pCur->eState!=CURSOR_VALID ) return 0; if( NEVER(pCur->pPage->leaf==0) ) return -1; n = pCur->pPage->nCell; @@ -77089,7 +77564,10 @@ static int fillInCell( n = nHeader + nPayload; testcase( n==3 ); testcase( n==4 ); - if( n<4 ) n = 4; + if( n<4 ){ + n = 4; + pPayload[nPayload] = 0; + } *pnSize = n; assert( nSrc<=nPayload ); testcase( nSrcaData[0]!=apOld[0]->aData[0] ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } @@ -78419,7 +78897,7 @@ static int balance_nonroot( memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow)); if( pOld->nOverflow>0 ){ if( NEVER(limitaiOvfl[0]) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } limit = pOld->aiOvfl[0]; @@ -79062,7 +79540,7 @@ static int anotherValidCursor(BtCursor *pCur){ && pOther->eState==CURSOR_VALID && pOther->pPage==pCur->pPage ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pCur->pPage); } } return SQLITE_OK; @@ -79122,7 +79600,7 @@ static int balance(BtCursor *pCur){ /* The page being written is not a root page, and there is currently ** more than one reference to it. This only happens if the page is one ** of its own ancestor pages. Corruption. */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; @@ -79286,7 +79764,7 @@ static SQLITE_NOINLINE int btreeOverwriteOverflowCell( rc = btreeGetPage(pBt, ovflPgno, &pPage, 0); if( rc ) return rc; if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 || pPage->isInit ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ if( iOffset+ovflPageSize<(u32)nTotal ){ ovflPgno = get4byte(pPage->aData); @@ -79314,7 +79792,7 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd || pCur->info.pPayload < pPage->aData + pPage->cellOffset ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCur->info.nLocal==nTotal ){ /* The entire cell is local */ @@ -79395,7 +79873,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** Which can only happen if the SQLITE_NoSchemaError flag was set when ** the schema was loaded. This cannot be asserted though, as a user might ** set the flag, load the schema, and then unset the flag. */ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } @@ -79518,7 +79996,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pPage->nFree<0 ){ if( NEVER(pCur->eState>CURSOR_INVALID) ){ /* ^^^^^--- due to the moveToRoot() call above */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ rc = btreeComputeFreeSpace(pPage); } @@ -79535,7 +80013,10 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( flags & BTREE_PREFORMAT ){ rc = SQLITE_OK; szNew = p->pBt->nPreformatSize; - if( szNew<4 ) szNew = 4; + if( szNew<4 ){ + szNew = 4; + newCell[3] = 0; + } if( ISAUTOVACUUM(p->pBt) && szNew>pPage->maxLocal ){ CellInfo info; pPage->xParseCell(pPage, newCell, &info); @@ -79557,7 +80038,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( CellInfo info; assert( idx>=0 ); if( idx>=pPage->nCell ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ @@ -79584,10 +80065,10 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ if( oldCell < pPage->aData+pPage->hdrOffset+10 ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( oldCell+szNew > pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } memcpy(oldCell, newCell, szNew); return SQLITE_OK; @@ -79597,7 +80078,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); idx = ++pCur->ix; - pCur->curFlags &= ~BTCF_ValidNKey; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); }else{ assert( pPage->leaf ); } @@ -79627,7 +80108,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( */ if( pPage->nOverflow ){ assert( rc==SQLITE_OK ); - pCur->curFlags &= ~(BTCF_ValidNKey); + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); rc = balance(pCur); /* Must make sure nOverflow is reset to zero even if the balance() @@ -79689,7 +80170,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 nIn = pSrc->info.nLocal; aIn = pSrc->info.pPayload; if( aIn+nIn>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } nRem = pSrc->info.nPayload; if( nIn==nRem && nInpPage->maxLocal ){ @@ -79714,7 +80195,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 if( nRem>nIn ){ if( aIn+nIn+4>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } ovflIn = get4byte(&pSrc->info.pPayload[nIn]); } @@ -79810,7 +80291,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); if( rc || pCur->eState!=CURSOR_VALID ) return rc; }else{ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } assert( pCur->eState==CURSOR_VALID ); @@ -79819,14 +80300,14 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ iCellIdx = pCur->ix; pPage = pCur->pPage; if( pPage->nCell<=iCellIdx ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } pCell = findCell(pPage, iCellIdx); if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCell<&pPage->aCellIdx[pPage->nCell] ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must @@ -79917,7 +80398,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ n = pCur->pPage->pgno; } pCell = findCell(pLeaf, pLeaf->nCell-1); - if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; + if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_PAGE(pLeaf); nCell = pLeaf->xCellSize(pLeaf, pCell); assert( MX_CELL_SIZE(pBt) >= nCell ); pTmp = pBt->pTmpSpace; @@ -80033,7 +80514,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ */ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); if( pgnoRoot>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgnoRoot); } pgnoRoot++; @@ -80081,7 +80562,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PGNO(pgnoRoot); } if( rc!=SQLITE_OK ){ releasePage(pRoot); @@ -80171,14 +80652,14 @@ static int clearDatabasePage( assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgno); } rc = getAndInitPage(pBt, pgno, &pPage, 0); if( rc ) return rc; if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); goto cleardatabasepage_out; } hdr = pPage->hdrOffset; @@ -80282,7 +80763,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ assert( p->inTrans==TRANS_WRITE ); assert( iTable>=2 ); if( iTable>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(iTable); } rc = sqlite3BtreeClearTable(p, iTable, 0); @@ -80876,6 +81357,9 @@ static int checkTreePage( ** number of cells on the page. */ nCell = get2byte(&data[hdr+3]); assert( pPage->nCell==nCell ); + if( pPage->leaf || pPage->intKey==0 ){ + pCheck->nRow += nCell; + } /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page ** immediately follows the b-tree page header. */ @@ -80987,6 +81471,7 @@ static int checkTreePage( btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); } } + assert( heap!=0 ); /* Add the freeblocks to the min-heap ** ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header @@ -81086,6 +81571,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + Mem *aCnt, /* Memory cells to write counts for each tree to */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -81099,7 +81585,9 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( int bPartial = 0; /* True if not checking all btrees */ int bCkFreelist = 1; /* True to scan the freelist */ VVA_ONLY( int nRef ); + assert( nRoot>0 ); + assert( aCnt!=0 ); /* aRoot[0]==0 means this is a partial check */ if( aRoot[0]==0 ){ @@ -81172,15 +81660,18 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( testcase( pBt->db->flags & SQLITE_CellSizeCk ); pBt->db->flags &= ~(u64)SQLITE_CellSizeCk; for(i=0; (int)iautoVacuum && aRoot[i]>1 && !bPartial ){ - checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); - } + if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){ + checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); + } #endif - sCheck.v0 = aRoot[i]; - checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + sCheck.v0 = aRoot[i]; + checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + } + sqlite3MemSetArrayInt64(aCnt, i, sCheck.nRow); } pBt->db->flags = savedDbFlags; @@ -83235,6 +83726,13 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ } } +/* +** Set the iIdx'th entry of array aMem[] to contain integer value val. +*/ +SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val){ + sqlite3VdbeMemSetInt64(&aMem[iIdx], val); +} + /* A no-op destructor */ SQLITE_PRIVATE void sqlite3NoopDestructor(void *p){ UNUSED_PARAMETER(p); } @@ -83923,14 +84421,20 @@ static int valueFromExpr( } /* Handle negative integers in a single step. This is needed in the - ** case when the value is -9223372036854775808. - */ - if( op==TK_UMINUS - && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){ - pExpr = pExpr->pLeft; - op = pExpr->op; - negInt = -1; - zNeg = "-"; + ** case when the value is -9223372036854775808. Except - do not do this + ** for hexadecimal literals. */ + if( op==TK_UMINUS ){ + Expr *pLeft = pExpr->pLeft; + if( (pLeft->op==TK_INTEGER || pLeft->op==TK_FLOAT) ){ + if( ExprHasProperty(pLeft, EP_IntValue) + || pLeft->u.zToken[0]!='0' || (pLeft->u.zToken[1] & ~0x20)!='X' + ){ + pExpr = pLeft; + op = pExpr->op; + negInt = -1; + zNeg = "-"; + } + } } if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ @@ -83939,12 +84443,26 @@ static int valueFromExpr( if( ExprHasProperty(pExpr, EP_IntValue) ){ sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt); }else{ - zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); - if( zVal==0 ) goto no_mem; - sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + i64 iVal; + if( op==TK_INTEGER && 0==sqlite3DecOrHexToI64(pExpr->u.zToken, &iVal) ){ + sqlite3VdbeMemSetInt64(pVal, iVal*negInt); + }else{ + zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); + if( zVal==0 ) goto no_mem; + sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + } } - if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){ - sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + if( affinity==SQLITE_AFF_BLOB ){ + if( op==TK_FLOAT ){ + assert( pVal && pVal->z && pVal->flags==(MEM_Str|MEM_Term) ); + sqlite3AtoF(pVal->z, &pVal->u.r, pVal->n, SQLITE_UTF8); + pVal->flags = MEM_Real; + }else if( op==TK_INTEGER ){ + /* This case is required by -9223372036854775808 and other strings + ** that look like integers but cannot be handled by the + ** sqlite3DecOrHexToI64() call above. */ + sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + } }else{ sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); } @@ -84214,17 +84732,17 @@ SQLITE_PRIVATE int sqlite3Stat4Column( sqlite3_value **ppVal /* OUT: Extracted value */ ){ u32 t = 0; /* a column type code */ - int nHdr; /* Size of the header in the record */ - int iHdr; /* Next unread header byte */ - int iField; /* Next unread data byte */ - int szField = 0; /* Size of the current data field */ + u32 nHdr; /* Size of the header in the record */ + u32 iHdr; /* Next unread header byte */ + i64 iField; /* Next unread data byte */ + u32 szField = 0; /* Size of the current data field */ int i; /* Column index */ u8 *a = (u8*)pRec; /* Typecast byte array */ Mem *pMem = *ppVal; /* Write result into this Mem object */ assert( iCol>0 ); iHdr = getVarint32(a, nHdr); - if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; + if( nHdr>(u32)nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; iField = nHdr; for(i=0; i<=iCol; i++){ iHdr += getVarint32(&a[iHdr], t); @@ -85259,6 +85777,15 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ assert( aLabel!=0 ); /* True because of tag-20230419-1 */ pOp->p2 = aLabel[ADDR(pOp->p2)]; } + + /* OPFLG_JUMP opcodes never have P2==0, though OPFLG_JUMP0 opcodes + ** might */ + assert( pOp->p2>0 + || (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP0)!=0 ); + + /* Jumps never go off the end of the bytecode array */ + assert( pOp->p2nOp + || (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)==0 ); break; } } @@ -87666,7 +88193,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ /* Check for immediate foreign key violations. */ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ - sqlite3VdbeCheckFk(p, 0); + (void)sqlite3VdbeCheckFk(p, 0); } /* If the auto-commit flag is set and this is the only active writer @@ -88380,6 +88907,23 @@ static void serialGet( pMem->flags = IsNaN(x) ? MEM_Null : MEM_Real; } } +static int serialGet7( + const unsigned char *buf, /* Buffer to deserialize from */ + Mem *pMem /* Memory cell to write value into */ +){ + u64 x = FOUR_BYTE_UINT(buf); + u32 y = FOUR_BYTE_UINT(buf+4); + x = (x<<32) + y; + assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 ); + swapMixedEndianFloat(x); + memcpy(&pMem->u.r, &x, sizeof(x)); + if( IsNaN(x) ){ + pMem->flags = MEM_Null; + return 1; + } + pMem->flags = MEM_Real; + return 0; +} SQLITE_PRIVATE void sqlite3VdbeSerialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ @@ -88819,17 +89363,15 @@ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){ return (xr); }else{ i64 y; - double s; if( r<-9223372036854775808.0 ) return +1; if( r>=9223372036854775808.0 ) return -1; y = (i64)r; if( iy ) return +1; - s = (double)i; - testcase( doubleLt(s,r) ); - testcase( doubleLt(r,s) ); - testcase( doubleEq(r,s) ); - return (sr); + testcase( doubleLt(((double)i),r) ); + testcase( doubleLt(r,((double)i)) ); + testcase( doubleEq(r,((double)i)) ); + return (((double)i)r); } } @@ -89059,7 +89601,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( }else if( serial_type==0 ){ rc = -1; }else if( serial_type==7 ){ - sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); + serialGet7(&aKey1[d1], &mem1); rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r); }else{ i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]); @@ -89084,14 +89626,18 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( }else if( serial_type==0 ){ rc = -1; }else{ - sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); if( serial_type==7 ){ - if( mem1.u.ru.r ){ + if( serialGet7(&aKey1[d1], &mem1) ){ + rc = -1; /* mem1 is a NaN */ + }else if( mem1.u.ru.r ){ rc = -1; }else if( mem1.u.r>pRhs->u.r ){ rc = +1; + }else{ + assert( rc==0 ); } }else{ + sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); rc = sqlite3IntFloatCompare(mem1.u.i, pRhs->u.r); } } @@ -89161,7 +89707,14 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( /* RHS is null */ else{ serial_type = aKey1[idx1]; - rc = (serial_type!=0 && serial_type!=10); + if( serial_type==0 + || serial_type==10 + || (serial_type==7 && serialGet7(&aKey1[d1], &mem1)!=0) + ){ + assert( rc==0 ); + }else{ + rc = 1; + } } if( rc!=0 ){ @@ -89621,7 +90174,8 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff assert( iVar>0 ); if( v ){ Mem *pMem = &v->aVar[iVar-1]; - assert( (v->db->flags & SQLITE_EnableQPSG)==0 ); + assert( (v->db->flags & SQLITE_EnableQPSG)==0 + || (v->db->mDbFlags & DBFLAG_InternalFunc)!=0 ); if( 0==(pMem->flags & MEM_Null) ){ sqlite3_value *pRet = sqlite3ValueNew(v->db); if( pRet ){ @@ -89641,7 +90195,8 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff */ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ assert( iVar>0 ); - assert( (v->db->flags & SQLITE_EnableQPSG)==0 ); + assert( (v->db->flags & SQLITE_EnableQPSG)==0 + || (v->db->mDbFlags & DBFLAG_InternalFunc)!=0 ); if( iVar>=32 ){ v->expmask |= 0x80000000; }else{ @@ -92226,7 +92781,6 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } if( flags & SQLITE_SCANSTAT_COMPLEX ){ idx = iScan; - pScan = &p->aScan[idx]; }else{ /* If the COMPLEX flag is clear, then this function must ignore any ** ScanStatus structures with ScanStatus.addrLoop set to 0. */ @@ -92239,6 +92793,8 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } } if( idx>=p->nScan ) return 1; + assert( pScan==0 || pScan==&p->aScan[idx] ); + pScan = &p->aScan[idx]; switch( iScanStatusOp ){ case SQLITE_SCANSTAT_NLOOP: { @@ -93687,7 +94243,7 @@ case OP_Return: { /* in1 */ ** ** See also: EndCoroutine */ -case OP_InitCoroutine: { /* jump */ +case OP_InitCoroutine: { /* jump0 */ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); assert( pOp->p2>=0 && pOp->p2nOp ); assert( pOp->p3>=0 && pOp->p3nOp ); @@ -93710,7 +94266,9 @@ case OP_InitCoroutine: { /* jump */ ** ** The instruction at the address in register P1 is a Yield. ** Jump to the P2 parameter of that Yield. -** After the jump, register P1 becomes undefined. +** After the jump, the value register P1 is left with a value +** such that subsequent OP_Yields go back to the this same +** OP_EndCoroutine instruction. ** ** See also: InitCoroutine */ @@ -93722,8 +94280,8 @@ case OP_EndCoroutine: { /* in1 */ pCaller = &aOp[pIn1->u.i]; assert( pCaller->opcode==OP_Yield ); assert( pCaller->p2>=0 && pCaller->p2nOp ); + pIn1->u.i = (int)(pOp - p->aOp) - 1; pOp = &aOp[pCaller->p2 - 1]; - pIn1->flags = MEM_Undefined; break; } @@ -93740,7 +94298,7 @@ case OP_EndCoroutine: { /* in1 */ ** ** See also: InitCoroutine */ -case OP_Yield: { /* in1, jump */ +case OP_Yield: { /* in1, jump0 */ int pcDest; pIn1 = &aMem[pOp->p1]; assert( VdbeMemDynamic(pIn1)==0 ); @@ -94070,19 +94628,15 @@ case OP_Blob: { /* out2 */ break; } -/* Opcode: Variable P1 P2 * P4 * -** Synopsis: r[P2]=parameter(P1,P4) +/* Opcode: Variable P1 P2 * * * +** Synopsis: r[P2]=parameter(P1) ** ** Transfer the values of bound parameter P1 into register P2 -** -** If the parameter is named, then its name appears in P4. -** The P4 value is used by sqlite3_bind_parameter_name(). */ case OP_Variable: { /* out2 */ Mem *pVar; /* Value being transferred */ assert( pOp->p1>0 && pOp->p1<=p->nVar ); - assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) ); pVar = &p->aVar[pOp->p1 - 1]; if( sqlite3VdbeMemTooBig(pVar) ){ goto too_big; @@ -94603,7 +95157,7 @@ case OP_AddImm: { /* in1 */ ** without data loss, then jump immediately to P2, or if P2==0 ** raise an SQLITE_MISMATCH exception. */ -case OP_MustBeInt: { /* jump, in1 */ +case OP_MustBeInt: { /* jump0, in1 */ pIn1 = &aMem[pOp->p1]; if( (pIn1->flags & MEM_Int)==0 ){ applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); @@ -94644,7 +95198,7 @@ case OP_RealAffinity: { /* in1 */ } #endif -#ifndef SQLITE_OMIT_CAST +#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_ANALYZE) /* Opcode: Cast P1 P2 * * * ** Synopsis: affinity(r[P1]) ** @@ -94859,7 +95413,9 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ } } }else if( affinity==SQLITE_AFF_TEXT && ((flags1 | flags3) & MEM_Str)!=0 ){ - if( (flags1 & MEM_Str)==0 && (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ + if( (flags1 & MEM_Str)!=0 ){ + pIn1->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal); + }else if( (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ testcase( pIn1->flags & MEM_Int ); testcase( pIn1->flags & MEM_Real ); testcase( pIn1->flags & MEM_IntReal ); @@ -94868,7 +95424,9 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask); if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str; } - if( (flags3 & MEM_Str)==0 && (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ + if( (flags3 & MEM_Str)!=0 ){ + pIn3->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal); + }else if( (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ testcase( pIn3->flags & MEM_Int ); testcase( pIn3->flags & MEM_Real ); testcase( pIn3->flags & MEM_IntReal ); @@ -96212,11 +96770,16 @@ case OP_MakeRecord: { switch( len ){ default: zPayload[7] = (u8)(v&0xff); v >>= 8; zPayload[6] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 6: zPayload[5] = (u8)(v&0xff); v >>= 8; zPayload[4] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 4: zPayload[3] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 3: zPayload[2] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 2: zPayload[1] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 1: zPayload[0] = (u8)(v&0xff); } zPayload += len; @@ -97135,7 +97698,8 @@ case OP_SequenceTest: { ** is the only cursor opcode that works with a pseudo-table. ** ** P3 is the number of fields in the records that will be stored by -** the pseudo-table. +** the pseudo-table. If P2 is 0 or negative then the pseudo-cursor +** will return NULL for every column. */ case OP_OpenPseudo: { VdbeCursor *pCx; @@ -97278,10 +97842,10 @@ case OP_ColumnsUsed: { ** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLt */ -case OP_SeekLT: /* jump, in3, group, ncycle */ -case OP_SeekLE: /* jump, in3, group, ncycle */ -case OP_SeekGE: /* jump, in3, group, ncycle */ -case OP_SeekGT: { /* jump, in3, group, ncycle */ +case OP_SeekLT: /* jump0, in3, group, ncycle */ +case OP_SeekLE: /* jump0, in3, group, ncycle */ +case OP_SeekGE: /* jump0, in3, group, ncycle */ +case OP_SeekGT: { /* jump0, in3, group, ncycle */ int res; /* Comparison result */ int oc; /* Opcode */ VdbeCursor *pC; /* The cursor to seek */ @@ -97948,7 +98512,7 @@ case OP_Found: { /* jump, in3, ncycle */ ** ** See also: Found, NotFound, NoConflict, SeekRowid */ -case OP_SeekRowid: { /* jump, in3, ncycle */ +case OP_SeekRowid: { /* jump0, in3, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -98707,7 +99271,7 @@ case OP_NullRow: { ** configured to use Prev, not Next. */ case OP_SeekEnd: /* ncycle */ -case OP_Last: { /* jump, ncycle */ +case OP_Last: { /* jump0, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -98741,28 +99305,38 @@ case OP_Last: { /* jump, ncycle */ break; } -/* Opcode: IfSmaller P1 P2 P3 * * +/* Opcode: IfSizeBetween P1 P2 P3 P4 * ** -** Estimate the number of rows in the table P1. Jump to P2 if that -** estimate is less than approximately 2**(0.1*P3). +** Let N be the approximate number of rows in the table or index +** with cursor P1 and let X be 10*log2(N) if N is positive or -1 +** if N is zero. +** +** Jump to P2 if X is in between P3 and P4, inclusive. */ -case OP_IfSmaller: { /* jump */ +case OP_IfSizeBetween: { /* jump */ VdbeCursor *pC; BtCursor *pCrsr; int res; i64 sz; assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p4type==P4_INT32 ); + assert( pOp->p3>=-1 && pOp->p3<=640*2 ); + assert( pOp->p4.i>=-1 && pOp->p4.i<=640*2 ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pCrsr = pC->uc.pCursor; assert( pCrsr ); rc = sqlite3BtreeFirst(pCrsr, &res); if( rc ) goto abort_due_to_error; - if( res==0 ){ + if( res!=0 ){ + sz = -1; /* -Infinity encoding */ + }else{ sz = sqlite3BtreeRowCountEst(pCrsr); - if( ALWAYS(sz>=0) && sqlite3LogEst((u64)sz)p3 ) res = 1; + assert( sz>0 ); + sz = sqlite3LogEst((u64)sz); } + res = sz>=pOp->p3 && sz<=pOp->p4.i; VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; break; @@ -98815,7 +99389,7 @@ case OP_Sort: { /* jump ncycle */ ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. */ -case OP_Rewind: { /* jump, ncycle */ +case OP_Rewind: { /* jump0, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -99462,11 +100036,18 @@ case OP_CreateBtree: { /* out2 */ break; } -/* Opcode: SqlExec * * * P4 * +/* Opcode: SqlExec P1 P2 * P4 * ** ** Run the SQL statement or statements specified in the P4 string. -** Disable Auth and Trace callbacks while those statements are running if -** P1 is true. +** +** The P1 parameter is a bitmask of options: +** +** 0x0001 Disable Auth and Trace callbacks while the statements +** in P4 are running. +** +** 0x0002 Set db->nAnalysisLimit to P2 while the statements in +** P4 are running. +** */ case OP_SqlExec: { char *zErr; @@ -99474,6 +100055,7 @@ case OP_SqlExec: { sqlite3_xauth xAuth; #endif u8 mTrace; + int savedAnalysisLimit; sqlite3VdbeIncrWriteCounter(p, 0); db->nSqlExec++; @@ -99482,18 +100064,23 @@ case OP_SqlExec: { xAuth = db->xAuth; #endif mTrace = db->mTrace; - if( pOp->p1 ){ + savedAnalysisLimit = db->nAnalysisLimit; + if( pOp->p1 & 0x0001 ){ #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = 0; #endif db->mTrace = 0; } + if( pOp->p1 & 0x0002 ){ + db->nAnalysisLimit = pOp->p2; + } rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr); db->nSqlExec--; #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = xAuth; #endif db->mTrace = mTrace; + db->nAnalysisLimit = savedAnalysisLimit; if( zErr || rc ){ sqlite3VdbeError(p, "%s", zErr); sqlite3_free(zErr); @@ -99645,11 +100232,11 @@ case OP_DropTrigger: { /* Opcode: IntegrityCk P1 P2 P3 P4 P5 ** ** Do an analysis of the currently open database. Store in -** register P1 the text of an error message describing any problems. -** If no problems are found, store a NULL in register P1. +** register (P1+1) the text of an error message describing any problems. +** If no problems are found, store a NULL in register (P1+1). ** -** The register P3 contains one less than the maximum number of allowed errors. -** At most reg(P3) errors will be reported. +** The register (P1) contains one less than the maximum number of allowed +** errors. At most reg(P1) errors will be reported. ** In other words, the analysis stops as soon as reg(P1) errors are ** seen. Reg(P1) is updated with the number of errors remaining. ** @@ -99669,19 +100256,21 @@ case OP_IntegrityCk: { Mem *pnErr; /* Register keeping track of errors remaining */ assert( p->bIsReader ); + assert( pOp->p4type==P4_INTARRAY ); nRoot = pOp->p2; aRoot = pOp->p4.ai; assert( nRoot>0 ); + assert( aRoot!=0 ); assert( aRoot[0]==(Pgno)nRoot ); - assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); - pnErr = &aMem[pOp->p3]; + assert( pOp->p1>0 && (pOp->p1+1)<=(p->nMem+1 - p->nCursor) ); + pnErr = &aMem[pOp->p1]; assert( (pnErr->flags & MEM_Int)!=0 ); assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); - pIn1 = &aMem[pOp->p1]; + pIn1 = &aMem[pOp->p1+1]; assert( pOp->p5nDb ); assert( DbMaskTest(p->btreeMask, pOp->p5) ); - rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, - (int)pnErr->u.i+1, &nErr, &z); + rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], + &aMem[pOp->p3], nRoot, (int)pnErr->u.i+1, &nErr, &z); sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); @@ -99808,7 +100397,9 @@ case OP_RowSetTest: { /* jump, in1, in3 */ ** P1 contains the address of the memory cell that contains the first memory ** cell in an array of values used as arguments to the sub-program. P2 ** contains the address to jump to if the sub-program throws an IGNORE -** exception using the RAISE() function. Register P3 contains the address +** exception using the RAISE() function. P2 might be zero, if there is +** no possibility that an IGNORE exception will be raised. +** Register P3 contains the address ** of a memory cell in this (the parent) VM that is used to allocate the ** memory required by the sub-vdbe at runtime. ** @@ -99816,7 +100407,7 @@ case OP_RowSetTest: { /* jump, in1, in3 */ ** ** If P5 is non-zero, then recursive program invocation is enabled. */ -case OP_Program: { /* jump */ +case OP_Program: { /* jump0 */ int nMem; /* Number of memory registers for sub-program */ int nByte; /* Bytes of runtime space required for sub-program */ Mem *pRt; /* Register to allocate runtime space */ @@ -101365,7 +101956,7 @@ case OP_Filter: { /* jump */ ** error is encountered. */ case OP_Trace: -case OP_Init: { /* jump */ +case OP_Init: { /* jump0 */ int i; #ifndef SQLITE_OMIT_TRACE char *zTrace; @@ -105266,10 +105857,10 @@ static int bytecodevtabColumn( #ifdef SQLITE_ENABLE_STMT_SCANSTATUS case 9: /* nexec */ - sqlite3_result_int(ctx, pOp->nExec); + sqlite3_result_int64(ctx, pOp->nExec); break; case 10: /* ncycle */ - sqlite3_result_int(ctx, pOp->nCycle); + sqlite3_result_int64(ctx, pOp->nCycle); break; #else case 9: /* nexec */ @@ -106213,6 +106804,8 @@ static void resolveAlias( assert( iCol>=0 && iColnExpr ); pOrig = pEList->a[iCol].pExpr; assert( pOrig!=0 ); + assert( !ExprHasProperty(pExpr, EP_Reduced|EP_TokenOnly) ); + if( pExpr->pAggInfo ) return; db = pParse->db; pDup = sqlite3ExprDup(db, pOrig, 0); if( db->mallocFailed ){ @@ -106360,7 +106953,7 @@ static void extendFJMatch( static SQLITE_NOINLINE int isValidSchemaTableName( const char *zTab, /* Name as it appears in the SQL */ Table *pTab, /* The schema table we are trying to match */ - Schema *pSchema /* non-NULL if a database qualifier is present */ + const char *zDb /* non-NULL if a database qualifier is present */ ){ const char *zLegacy; assert( pTab!=0 ); @@ -106371,7 +106964,7 @@ static SQLITE_NOINLINE int isValidSchemaTableName( if( sqlite3StrICmp(zTab+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ return 1; } - if( pSchema==0 ) return 0; + if( zDb==0 ) return 0; if( sqlite3StrICmp(zTab+7, &LEGACY_SCHEMA_TABLE[7])==0 ) return 1; if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; }else{ @@ -106411,7 +107004,7 @@ static int lookupName( Parse *pParse, /* The parsing context */ const char *zDb, /* Name of the database containing table, or NULL */ const char *zTab, /* Name of table containing column, or NULL */ - const char *zCol, /* Name of the column. */ + const Expr *pRight, /* Name of the column. */ NameContext *pNC, /* The name context used to resolve the name */ Expr *pExpr /* Make this EXPR node point to the selected column */ ){ @@ -106428,6 +107021,7 @@ static int lookupName( Table *pTab = 0; /* Table holding the row */ Column *pCol; /* A column of pTab */ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */ + const char *zCol = pRight->u.zToken; assert( pNC ); /* the name context cannot be NULL. */ assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ @@ -106553,7 +107147,7 @@ static int lookupName( } }else if( sqlite3StrICmp(zTab, pTab->zName)!=0 ){ if( pTab->tnum!=1 ) continue; - if( !isValidSchemaTableName(zTab, pTab, pSchema) ) continue; + if( !isValidSchemaTableName(zTab, pTab, zDb) ) continue; } assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT && pItem->zAlias ){ @@ -106600,8 +107194,37 @@ static int lookupName( } } if( 0==cnt && VisibleRowid(pTab) ){ + /* pTab is a potential ROWID match. Keep track of it and match + ** the ROWID later if that seems appropriate. (Search for "cntTab" + ** to find related code.) Only allow a ROWID match if there is + ** a single ROWID match candidate. + */ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + /* In SQLITE_ALLOW_ROWID_IN_VIEW mode, allow a ROWID match + ** if there is a single VIEW candidate or if there is a single + ** non-VIEW candidate plus multiple VIEW candidates. In other + ** words non-VIEW candidate terms take precedence over VIEWs. + */ + if( cntTab==0 + || (cntTab==1 + && ALWAYS(pMatch!=0) + && ALWAYS(pMatch->pTab!=0) + && (pMatch->pTab->tabFlags & TF_Ephemeral)!=0 + && (pTab->tabFlags & TF_Ephemeral)==0) + ){ + cntTab = 1; + pMatch = pItem; + }else{ + cntTab++; + } +#else + /* The (much more common) non-SQLITE_ALLOW_ROWID_IN_VIEW case is + ** simpler since we require exactly one candidate, which will + ** always be a non-VIEW + */ cntTab++; pMatch = pItem; +#endif } } if( pMatch ){ @@ -106630,7 +107253,8 @@ static int lookupName( if( pParse->bReturning ){ if( (pNC->ncFlags & NC_UBaseReg)!=0 && ALWAYS(zTab==0 - || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0 + || isValidSchemaTableName(zTab, pParse->pTriggerTab, 0)) ){ pExpr->iTable = op!=TK_DELETE; pTab = pParse->pTriggerTab; @@ -106727,13 +107351,18 @@ static int lookupName( ** Perhaps the name is a reference to the ROWID */ if( cnt==0 - && cntTab==1 + && cntTab>=1 && pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) ){ - cnt = 1; + cnt = cntTab; +#if SQLITE_ALLOW_ROWID_IN_VIEW+0==2 + if( pMatch->pTab!=0 && IsView(pMatch->pTab) ){ + eNewExprOp = TK_NULL; + } +#endif if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; } @@ -106887,6 +107516,10 @@ static int lookupName( sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); }else if( zTab ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); + }else if( cnt==0 && ExprHasProperty(pRight,EP_DblQuoted) ){ + sqlite3ErrorMsg(pParse, "%s: \"%s\" - should this be a" + " string literal in single-quotes?", + zErr, zCol); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } @@ -106920,8 +107553,12 @@ static int lookupName( ** If a generated column is referenced, set bits for every column ** of the table. */ - if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){ - pMatch->colUsed |= sqlite3ExprColUsed(pExpr); + if( pMatch ){ + if( pExpr->iColumn>=0 ){ + pMatch->colUsed |= sqlite3ExprColUsed(pExpr); + }else{ + pMatch->fg.rowidUsed = 1; + } } pExpr->op = eNewExprOp; @@ -107098,6 +107735,19 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ** resolved. This prevents "column" from being counted as having been ** referenced, which might prevent a SELECT from being erroneously ** marked as correlated. + ** + ** 2024-03-28: Beware of aggregates. A bare column of aggregated table + ** can still evaluate to NULL even though it is marked as NOT NULL. + ** Example: + ** + ** CREATE TABLE t1(a INT NOT NULL); + ** SELECT a, a IS NULL, a IS NOT NULL, count(*) FROM t1; + ** + ** The "a IS NULL" and "a IS NOT NULL" expressions cannot be optimized + ** here because at the time this case is hit, we do not yet know whether + ** or not t1 is being aggregated. We have to assume the worst and omit + ** the optimization. The only time it is safe to apply this optimization + ** is within the WHERE clause. */ case TK_NOTNULL: case TK_ISNULL: { @@ -107108,19 +107758,36 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ anRef[i] = p->nRef; } sqlite3WalkExpr(pWalker, pExpr->pLeft); - if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){ - testcase( ExprHasProperty(pExpr, EP_OuterON) ); - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - pExpr->u.iValue = (pExpr->op==TK_NOTNULL); - pExpr->flags |= EP_IntValue; - pExpr->op = TK_INTEGER; + if( IN_RENAME_OBJECT ) return WRC_Prune; + if( sqlite3ExprCanBeNull(pExpr->pLeft) ){ + /* The expression can be NULL. So the optimization does not apply */ + return WRC_Prune; + } - for(i=0, p=pNC; p && ipNext, i++){ - p->nRef = anRef[i]; + for(i=0, p=pNC; p; p=p->pNext, i++){ + if( (p->ncFlags & NC_Where)==0 ){ + return WRC_Prune; /* Not in a WHERE clause. Unsafe to optimize. */ } - sqlite3ExprDelete(pParse->db, pExpr->pLeft); - pExpr->pLeft = 0; } + testcase( ExprHasProperty(pExpr, EP_OuterON) ); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x80000 ){ + sqlite3DebugPrintf( + "NOT NULL strength reduction converts the following to %d:\n", + pExpr->op==TK_NOTNULL + ); + sqlite3ShowExpr(pExpr); + } +#endif /* TREETRACE_ENABLED */ + pExpr->u.iValue = (pExpr->op==TK_NOTNULL); + pExpr->flags |= EP_IntValue; + pExpr->op = TK_INTEGER; + for(i=0, p=pNC; p && ipNext, i++){ + p->nRef = anRef[i]; + } + sqlite3ExprDelete(pParse->db, pExpr->pLeft); + pExpr->pLeft = 0; return WRC_Prune; } @@ -107134,7 +107801,6 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ */ case TK_ID: case TK_DOT: { - const char *zColumn; const char *zTable; const char *zDb; Expr *pRight; @@ -107143,7 +107809,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ zDb = 0; zTable = 0; assert( !ExprHasProperty(pExpr, EP_IntValue) ); - zColumn = pExpr->u.zToken; + pRight = pExpr; }else{ Expr *pLeft = pExpr->pLeft; testcase( pNC->ncFlags & NC_IdxExpr ); @@ -107162,14 +107828,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ } assert( ExprUseUToken(pLeft) && ExprUseUToken(pRight) ); zTable = pLeft->u.zToken; - zColumn = pRight->u.zToken; assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight); sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft); } } - return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); + return lookupName(pParse, zDb, zTable, pRight, pNC, pExpr); } /* Resolve function names @@ -107345,11 +108010,9 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif } } -#ifndef SQLITE_OMIT_WINDOWFUNC - else if( ExprHasProperty(pExpr, EP_WinFunc) ){ + else if( ExprHasProperty(pExpr, EP_WinFunc) || pExpr->pLeft ){ is_agg = 1; } -#endif sqlite3WalkExprList(pWalker, pList); if( is_agg ){ if( pExpr->pLeft ){ @@ -107419,6 +108082,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pNC->ncFlags & NC_PartIdx ); testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); + assert( pExpr->x.pSelect ); if( pNC->ncFlags & NC_SelfRef ){ notValidImpl(pParse, pNC, "subqueries", pExpr, pExpr); }else{ @@ -107427,6 +108091,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); + pExpr->x.pSelect->selFlags |= SF_Correlated; } pNC->ncFlags |= NC_Subquery; } @@ -107952,6 +108617,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; + assert( pItem->zName!=0 || pItem->pSelect!=0 );/* Test of tag-20240424-1*/ if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; @@ -108020,7 +108686,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; } + sNC.ncFlags |= NC_Where; if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; + sNC.ncFlags &= ~NC_Where; /* Resolve names in table-valued-function arguments */ for(i=0; ipSrc->nSrc; i++){ @@ -108211,6 +108879,9 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( ** Resolve all names for all expression in an expression list. This is ** just like sqlite3ResolveExprNames() except that it works for an expression ** list rather than a single expression. +** +** The return value is SQLITE_OK (0) for success or SQLITE_ERROR (1) for a +** failure. */ SQLITE_PRIVATE int sqlite3ResolveExprListNames( NameContext *pNC, /* Namespace to resolve expressions in. */ @@ -108219,7 +108890,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( int i; int savedHasAgg = 0; Walker w; - if( pList==0 ) return WRC_Continue; + if( pList==0 ) return SQLITE_OK; w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; @@ -108233,7 +108904,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight += pExpr->nHeight; if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){ - return WRC_Abort; + return SQLITE_ERROR; } #endif sqlite3WalkExprNN(&w, pExpr); @@ -108250,10 +108921,10 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); } - if( w.pParse->nErr>0 ) return WRC_Abort; + if( w.pParse->nErr>0 ) return SQLITE_ERROR; } pNC->ncFlags |= savedHasAgg; - return WRC_Continue; + return SQLITE_OK; } /* @@ -108559,9 +109230,10 @@ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr *pExpr){ assert( pExpr->x.pList->nExpr>0 ); assert( pExpr->op==TK_FUNCTION ); pExpr = pExpr->x.pList->a[0].pExpr; - }else{ - assert( pExpr->op==TK_COLLATE ); + }else if( pExpr->op==TK_COLLATE ){ pExpr = pExpr->pLeft; + }else{ + break; } } return pExpr; @@ -109255,11 +109927,12 @@ SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){ ** appear to be quoted. If the quotes were of the form "..." (double-quotes) ** then the EP_DblQuoted flag is set on the expression node. ** -** Special case: If op==TK_INTEGER and pToken points to a string that -** can be translated into a 32-bit integer, then the token is not -** stored in u.zToken. Instead, the integer values is written -** into u.iValue and the EP_IntValue flag is set. No extra storage +** Special case (tag-20240227-a): If op==TK_INTEGER and pToken points to +** a string that can be translated into a 32-bit integer, then the token is +** not stored in u.zToken. Instead, the integer values is written +** into u.iValue and the EP_IntValue flag is set. No extra storage ** is allocated to hold the integer text and the dequote flag is ignored. +** See also tag-20240227-b. */ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( sqlite3 *db, /* Handle for sqlite3DbMallocRawNN() */ @@ -109275,7 +109948,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( if( pToken ){ if( op!=TK_INTEGER || pToken->z==0 || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - nExtra = pToken->n+1; + nExtra = pToken->n+1; /* tag-20240227-a */ assert( iValue>=0 ); } } @@ -109707,6 +110380,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ assert( p!=0 ); assert( db!=0 ); +exprDeleteRestart: assert( !ExprUseUValue(p) || p->u.iValue>=0 ); assert( !ExprUseYWin(p) || !ExprUseYSub(p) ); assert( !ExprUseYWin(p) || p->y.pWin!=0 || db->mallocFailed ); @@ -109722,7 +110396,6 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){ /* The Expr.x union is never used at the same time as Expr.pRight */ assert( (ExprUseXList(p) && p->x.pList==0) || p->pRight==0 ); - if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft); if( p->pRight ){ assert( !ExprHasProperty(p, EP_WinFunc) ); sqlite3ExprDeleteNN(db, p->pRight); @@ -109737,6 +110410,19 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ } #endif } + if( p->pLeft && p->op!=TK_SELECT_COLUMN ){ + Expr *pLeft = p->pLeft; + if( !ExprHasProperty(p, EP_Static) + && !ExprHasProperty(pLeft, EP_Static) + ){ + /* Avoid unnecessary recursion on unary operators */ + sqlite3DbNNFreeNN(db, p); + p = pLeft; + goto exprDeleteRestart; + }else{ + sqlite3ExprDeleteNN(db, pLeft); + } + } } if( !ExprHasProperty(p, EP_Static) ){ sqlite3DbNNFreeNN(db, p); @@ -109769,11 +110455,11 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ ** ** The pExpr might be deleted immediately on an OOM error. ** -** The deferred delete is (currently) implemented by adding the -** pExpr to the pParse->pConstExpr list with a register number of 0. +** Return 0 if the delete was successfully deferred. Return non-zero +** if the delete happened immediately because of an OOM. */ -SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ - sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); +SQLITE_PRIVATE int sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ + return 0==sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); } /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the @@ -110209,17 +110895,19 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla pNewItem->iCursor = pOldItem->iCursor; pNewItem->addrFillSub = pOldItem->addrFillSub; pNewItem->regReturn = pOldItem->regReturn; + pNewItem->regResult = pOldItem->regResult; if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); + }else if( pNewItem->fg.isTabFunc ){ + pNewItem->u1.pFuncArg = + sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); + }else{ + pNewItem->u1.nRow = pOldItem->u1.nRow; } pNewItem->u2 = pOldItem->u2; if( pNewItem->fg.isCte ){ pNewItem->u2.pCteUse->nUse++; } - if( pNewItem->fg.isTabFunc ){ - pNewItem->u1.pFuncArg = - sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); - } pTab = pNewItem->pTab = pOldItem->pTab; if( pTab ){ pTab->nTabRef++; @@ -110685,6 +111373,54 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ return pExpr; } +/* +** pExpr is a TK_FUNCTION node. Try to determine whether or not the +** function is a constant function. A function is constant if all of +** the following are true: +** +** (1) It is a scalar function (not an aggregate or window function) +** (2) It has either the SQLITE_FUNC_CONSTANT or SQLITE_FUNC_SLOCHNG +** property. +** (3) All of its arguments are constants +** +** This routine sets pWalker->eCode to 0 if pExpr is not a constant. +** It makes no changes to pWalker->eCode if pExpr is constant. In +** every case, it returns WRC_Abort. +** +** Called as a service subroutine from exprNodeIsConstant(). +*/ +static SQLITE_NOINLINE int exprNodeIsConstantFunction( + Walker *pWalker, + Expr *pExpr +){ + int n; /* Number of arguments */ + ExprList *pList; /* List of arguments */ + FuncDef *pDef; /* The function */ + sqlite3 *db; /* The database */ + + assert( pExpr->op==TK_FUNCTION ); + if( ExprHasProperty(pExpr, EP_TokenOnly) + || (pList = pExpr->x.pList)==0 + ){; + n = 0; + }else{ + n = pList->nExpr; + sqlite3WalkExprList(pWalker, pList); + if( pWalker->eCode==0 ) return WRC_Abort; + } + db = pWalker->pParse->db; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 + || pDef->xFinalize!=0 + || (pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 + || ExprHasProperty(pExpr, EP_WinFunc) + ){ + pWalker->eCode = 0; + return WRC_Abort; + } + return WRC_Prune; +} + /* ** These routines are Walker callbacks used to check expressions to @@ -110713,6 +111449,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ ** malformed schema error. */ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ + assert( pWalker->eCode>0 ); /* If pWalker->eCode is 2 then any term of the expression that comes from ** the ON or USING clauses of an outer join disqualifies the expression @@ -110732,6 +111469,8 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ ){ if( pWalker->eCode==5 ) ExprSetProperty(pExpr, EP_FromDDL); return WRC_Continue; + }else if( pWalker->pParse ){ + return exprNodeIsConstantFunction(pWalker, pExpr); }else{ pWalker->eCode = 0; return WRC_Abort; @@ -110760,9 +111499,11 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ case TK_IF_NULL_ROW: case TK_REGISTER: case TK_DOT: + case TK_RAISE: testcase( pExpr->op==TK_REGISTER ); testcase( pExpr->op==TK_IF_NULL_ROW ); testcase( pExpr->op==TK_DOT ); + testcase( pExpr->op==TK_RAISE ); pWalker->eCode = 0; return WRC_Abort; case TK_VARIABLE: @@ -110784,15 +111525,15 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ return WRC_Continue; } } -static int exprIsConst(Expr *p, int initFlag, int iCur){ +static int exprIsConst(Parse *pParse, Expr *p, int initFlag){ Walker w; w.eCode = initFlag; + w.pParse = pParse; w.xExprCallback = exprNodeIsConstant; w.xSelectCallback = sqlite3SelectWalkFail; #ifdef SQLITE_DEBUG w.xSelectCallback2 = sqlite3SelectWalkAssert2; #endif - w.u.iCur = iCur; sqlite3WalkExpr(&w, p); return w.eCode; } @@ -110804,9 +111545,15 @@ static int exprIsConst(Expr *p, int initFlag, int iCur){ ** For the purposes of this function, a double-quoted string (ex: "abc") ** is considered a variable but a single-quoted string (ex: 'abc') is ** a constant. +** +** The pParse parameter may be NULL. But if it is NULL, there is no way +** to determine if function calls are constant or not, and hence all +** function calls will be considered to be non-constant. If pParse is +** not NULL, then a function call might be constant, depending on the +** function and on its parameters. */ -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ - return exprIsConst(p, 1, 0); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Parse *pParse, Expr *p){ + return exprIsConst(pParse, p, 1); } /* @@ -110822,8 +111569,24 @@ SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ ** can be added to the pParse->pConstExpr list and evaluated once when ** the prepared statement starts up. See sqlite3ExprCodeRunJustOnce(). */ -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ - return exprIsConst(p, 2, 0); +static int sqlite3ExprIsConstantNotJoin(Parse *pParse, Expr *p){ + return exprIsConst(pParse, p, 2); +} + +/* +** This routine examines sub-SELECT statements as an expression is being +** walked as part of sqlite3ExprIsTableConstant(). Sub-SELECTs are considered +** constant as long as they are uncorrelated - meaning that they do not +** contain any terms from outer contexts. +*/ +static int exprSelectWalkTableConstant(Walker *pWalker, Select *pSelect){ + assert( pSelect!=0 ); + assert( pWalker->eCode==3 || pWalker->eCode==0 ); + if( (pSelect->selFlags & SF_Correlated)!=0 ){ + pWalker->eCode = 0; + return WRC_Abort; + } + return WRC_Prune; } /* @@ -110831,9 +111594,26 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ ** for any single row of the table with cursor iCur. In other words, the ** expression must not refer to any non-deterministic function nor any ** table other than iCur. +** +** Consider uncorrelated subqueries to be constants if the bAllowSubq +** parameter is true. */ -SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ - return exprIsConst(p, 3, iCur); +static int sqlite3ExprIsTableConstant(Expr *p, int iCur, int bAllowSubq){ + Walker w; + w.eCode = 3; + w.pParse = 0; + w.xExprCallback = exprNodeIsConstant; + if( bAllowSubq ){ + w.xSelectCallback = exprSelectWalkTableConstant; + }else{ + w.xSelectCallback = sqlite3SelectWalkFail; +#ifdef SQLITE_DEBUG + w.xSelectCallback2 = sqlite3SelectWalkAssert2; +#endif + } + w.u.iCur = iCur; + sqlite3WalkExpr(&w, p); + return w.eCode; } /* @@ -110851,7 +111631,10 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ ** ** (1) pExpr cannot refer to any table other than pSrc->iCursor. ** -** (2) pExpr cannot use subqueries or non-deterministic functions. +** (2a) pExpr cannot use subqueries unless the bAllowSubq parameter is +** true and the subquery is non-correlated +** +** (2b) pExpr cannot use non-deterministic functions. ** ** (3) pSrc cannot be part of the left operand for a RIGHT JOIN. ** (Is there some way to relax this constraint?) @@ -110880,7 +111663,8 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( Expr *pExpr, /* The constraint */ const SrcList *pSrcList, /* Complete FROM clause */ - int iSrc /* Which element of pSrcList to use */ + int iSrc, /* Which element of pSrcList to use */ + int bAllowSubq /* Allow non-correlated subqueries */ ){ const SrcItem *pSrc = &pSrcList->a[iSrc]; if( pSrc->fg.jointype & JT_LTORJ ){ @@ -110905,7 +111689,8 @@ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( } } } - return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ + /* Rules (1), (2a), and (2b) handled by the following: */ + return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor, bAllowSubq); } @@ -110990,7 +111775,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse *pParse, Expr *p, ExprLi */ SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){ assert( isInit==0 || isInit==1 ); - return exprIsConst(p, 4+isInit, 0); + return exprIsConst(0, p, 4+isInit); } #ifdef SQLITE_ENABLE_CURSOR_HINTS @@ -111080,9 +111865,12 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ return 0; case TK_COLUMN: assert( ExprUseYTab(p) ); - return ExprHasProperty(p, EP_CanBeNull) || - NEVER(p->y.pTab==0) || /* Reference to column of index on expr */ - (p->iColumn>=0 + return ExprHasProperty(p, EP_CanBeNull) + || NEVER(p->y.pTab==0) /* Reference to column of index on expr */ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + || (p->iColumn==XN_ROWID && IsView(p->y.pTab)) +#endif + || (p->iColumn>=0 && p->y.pTab->aCol!=0 /* Possible due to prior error */ && ALWAYS(p->iColumny.pTab->nCol) && p->y.pTab->aCol[p->iColumn].notNull==0); @@ -111235,13 +112023,13 @@ static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){ ** The argument is an IN operator with a list (not a subquery) on the ** right-hand side. Return TRUE if that list is constant. */ -static int sqlite3InRhsIsConstant(Expr *pIn){ +static int sqlite3InRhsIsConstant(Parse *pParse, Expr *pIn){ Expr *pLHS; int res; assert( !ExprHasProperty(pIn, EP_xIsSelect) ); pLHS = pIn->pLeft; pIn->pLeft = 0; - res = sqlite3ExprIsConstant(pIn); + res = sqlite3ExprIsConstant(pParse, pIn); pIn->pLeft = pLHS; return res; } @@ -111510,7 +112298,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( if( eType==0 && (inFlags & IN_INDEX_NOOP_OK) && ExprUseXList(pX) - && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2) + && (!sqlite3InRhsIsConstant(pParse,pX) || pX->x.pList->nExpr<=2) ){ pParse->nTab--; /* Back out the allocation of the unused cursor */ iTab = -1; /* Cursor is not allocated */ @@ -111793,7 +112581,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( ** this code only executes once. Because for a non-constant ** expression we need to rerun this code each time. */ - if( addrOnce && !sqlite3ExprIsConstant(pE2) ){ + if( addrOnce && !sqlite3ExprIsConstant(pParse, pE2) ){ sqlite3VdbeChangeToNoop(v, addrOnce-1); sqlite3VdbeChangeToNoop(v, addrOnce); ExprClearProperty(pExpr, EP_Subrtn); @@ -112957,12 +113745,6 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) assert( pExpr->u.zToken!=0 ); assert( pExpr->u.zToken[0]!=0 ); sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); - if( pExpr->u.zToken[1]!=0 ){ - const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn); - assert( pExpr->u.zToken[0]=='?' || (z && !strcmp(pExpr->u.zToken, z)) ); - pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */ - sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC); - } return target; } case TK_REGISTER: { @@ -113136,7 +113918,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } #endif - if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){ + if( ConstFactorOk(pParse) + && sqlite3ExprIsConstantNotJoin(pParse,pExpr) + ){ /* SQL functions can be expensive. So try to avoid running them ** multiple times if we know they always give the same result */ return sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); @@ -113167,7 +113951,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } for(i=0; ia[i].pExpr) ){ + if( i<32 && sqlite3ExprIsConstant(pParse, pFarg->a[i].pExpr) ){ testcase( i==31 ); constMask |= MASKBIT32(i); } @@ -113309,8 +114093,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( !ExprHasProperty(pExpr, EP_Collate) ){ /* A TK_COLLATE Expr node without the EP_Collate tag is a so-called ** "SOFT-COLLATE" that is added to constraints that are pushed down - ** from outer queries into sub-queries by the push-down optimization. - ** Clear subtypes as subtypes may not cross a subquery boundary. + ** from outer queries into sub-queries by the WHERE-clause push-down + ** optimization. Clear subtypes as subtypes may not cross a subquery + ** boundary. */ assert( pExpr->pLeft ); sqlite3ExprCode(pParse, pExpr->pLeft, target); @@ -113634,7 +114419,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ if( ConstFactorOk(pParse) && ALWAYS(pExpr!=0) && pExpr->op!=TK_REGISTER - && sqlite3ExprIsConstantNotJoin(pExpr) + && sqlite3ExprIsConstantNotJoin(pParse, pExpr) ){ *pReg = 0; r2 = sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); @@ -113698,7 +114483,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){ ** might choose to code the expression at initialization time. */ SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ - if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pExpr) ){ + if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pParse,pExpr) ){ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target); }else{ sqlite3ExprCodeCopy(pParse, pExpr, target); @@ -113757,7 +114542,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList( sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i); } }else if( (flags & SQLITE_ECEL_FACTOR)!=0 - && sqlite3ExprIsConstantNotJoin(pExpr) + && sqlite3ExprIsConstantNotJoin(pParse,pExpr) ){ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target+i); }else{ @@ -114908,9 +115693,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ && pAggInfo->aCol[iAgg].pCExpr==pExpr ){ pExpr = sqlite3ExprDup(db, pExpr, 0); - if( pExpr ){ + if( pExpr && !sqlite3ExprDeferredDelete(pParse, pExpr) ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; - sqlite3ExprDeferredDelete(pParse, pExpr); } } }else{ @@ -114919,9 +115703,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ && pAggInfo->aFunc[iAgg].pFExpr==pExpr ){ pExpr = sqlite3ExprDup(db, pExpr, 0); - if( pExpr ){ + if( pExpr && !sqlite3ExprDeferredDelete(pParse, pExpr) ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; - sqlite3ExprDeferredDelete(pParse, pExpr); } } } @@ -116680,7 +117463,7 @@ static int renameResolveTrigger(Parse *pParse){ /* ALWAYS() because if the table of the trigger does not exist, the ** error would have been hit before this point */ if( ALWAYS(pParse->pTriggerTab) ){ - rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab); + rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab)!=0; } /* Resolve symbols in WHEN clause */ @@ -117622,7 +118405,12 @@ SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, const T if( i==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regOut); }else{ + char aff = pTab->aCol[i].affinity; + if( aff==SQLITE_AFF_REAL ){ + pTab->aCol[i].affinity = SQLITE_AFF_NUMERIC; + } sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, i, regOut); + pTab->aCol[i].affinity = aff; } nField++; } @@ -118541,7 +119329,7 @@ static void statGet( if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1; sqlite3_str_appendf(&sStat, " %llu", iVal); #ifdef SQLITE_ENABLE_STAT4 - assert( p->current.anEq[i] ); + assert( p->current.anEq[i] || p->nRow==0 ); #endif } sqlite3ResultStrAccum(context, &sStat); @@ -118726,7 +119514,7 @@ static void analyzeOneTable( for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int nCol; /* Number of columns in pIdx. "N" */ - int addrRewind; /* Address of "OP_Rewind iIdxCur" */ + int addrGotoEnd; /* Address of "OP_Rewind iIdxCur" */ int addrNextRow; /* Address of "next_row:" */ const char *zIdxName; /* Name of the index */ int nColTest; /* Number of columns to test for changes */ @@ -118750,9 +119538,14 @@ static void analyzeOneTable( /* ** Pseudo-code for loop that calls stat_push(): ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; ** regChng = 0 + ** Rewind csr + ** if eof(csr){ + ** stat_init() with count = 0; + ** goto end_of_scan; + ** } + ** count() + ** stat_init() ** goto chng_addr_0; ** ** next_row: @@ -118791,41 +119584,36 @@ static void analyzeOneTable( sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); - /* Invoke the stat_init() function. The arguments are: + /* Implementation of the following: ** + ** regChng = 0 + ** Rewind csr + ** if eof(csr){ + ** stat_init() with count = 0; + ** goto end_of_scan; + ** } + ** count() + ** stat_init() + ** goto chng_addr_0; + */ + assert( regTemp2==regStat+4 ); + sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2); + + /* Arguments to stat_init(): ** (1) the number of columns in the index including the rowid ** (or for a WITHOUT ROWID table, the number of PK columns), ** (2) the number of columns in the key without the rowid/pk - ** (3) estimated number of rows in the index, - */ + ** (3) estimated number of rows in the index. */ sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat+1); assert( regRowid==regStat+2 ); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regRowid); -#ifdef SQLITE_ENABLE_STAT4 - if( OptimizationEnabled(db, SQLITE_Stat4) ){ - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regTemp); - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - }else -#endif - { - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, 1); - } - assert( regTemp2==regStat+4 ); - sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2); + sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, + OptimizationDisabled(db, SQLITE_Stat4)); sqlite3VdbeAddFunctionCall(pParse, 0, regStat+1, regStat, 4, &statInitFuncdef, 0); + addrGotoEnd = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); + VdbeCoverage(v); - /* Implementation of the following: - ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; - ** regChng = 0 - ** goto next_push_0; - ** - */ sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); addrNextRow = sqlite3VdbeCurrentAddr(v); @@ -118932,6 +119720,12 @@ static void analyzeOneTable( } /* Add the entry to the stat1 table. */ + if( pIdx->pPartIdxWhere ){ + /* Partial indexes might get a zero-entry in sqlite_stat1. But + ** an empty table is omitted from sqlite_stat1. */ + sqlite3VdbeJumpHere(v, addrGotoEnd); + addrGotoEnd = 0; + } callStatGet(pParse, regStat, STAT_GET_STAT1, regStat1); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); @@ -118955,6 +119749,13 @@ static void analyzeOneTable( int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; + /* No STAT4 data is generated if the number of rows is zero */ + if( addrGotoEnd==0 ){ + sqlite3VdbeAddOp2(v, OP_Cast, regStat1, SQLITE_AFF_INTEGER); + addrGotoEnd = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); + VdbeCoverage(v); + } + if( doOnce ){ int mxCol = nCol; Index *pX; @@ -119007,7 +119808,7 @@ static void analyzeOneTable( #endif /* SQLITE_ENABLE_STAT4 */ /* End of analysis */ - sqlite3VdbeJumpHere(v, addrRewind); + if( addrGotoEnd ) sqlite3VdbeJumpHere(v, addrGotoEnd); } @@ -120756,7 +121557,7 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } sqlite3VdbeAddOp0(v, OP_Halt); -#if SQLITE_USER_AUTHENTICATION +#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE) if( pParse->nTableLock>0 && db->init.busy==0 ){ sqlite3UserAuthInit(db); if( db->auth.authLevelrc = SQLITE_ERROR; pParse->nErr++; return; } + iCsr = pParse->nTab++; regYield = ++pParse->nMem; regRec = ++pParse->nMem; regRowid = ++pParse->nMem; - assert(pParse->nTab==1); sqlite3MayAbort(pParse); - sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); - pParse->nTab = 2; addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); if( pParse->nErr ) return; @@ -123429,11 +124230,11 @@ SQLITE_PRIVATE void sqlite3EndTable( VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec); sqlite3TableAffinity(v, p, 0); - sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid); - sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid); + sqlite3VdbeAddOp2(v, OP_NewRowid, iCsr, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iCsr, regRec, regRowid); sqlite3VdbeGoto(v, addrInsLoop); sqlite3VdbeJumpHere(v, addrInsLoop); - sqlite3VdbeAddOp1(v, OP_Close, 1); + sqlite3VdbeAddOp1(v, OP_Close, iCsr); } /* Compute the complete text of the CREATE statement */ @@ -123490,13 +124291,10 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Test for cycles in generated columns and illegal expressions ** in CHECK constraints and in DEFAULT clauses. */ if( p->tabFlags & TF_HasGenerated ){ - sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3VdbeAddOp4(v, OP_SqlExec, 0x0001, 0, 0, sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"", db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } - sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, - sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)", - db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } /* Add the table to the in-memory representation of the database. @@ -123573,9 +124371,12 @@ SQLITE_PRIVATE void sqlite3CreateView( ** on a view, even though views do not have rowids. The following flag ** setting fixes this problem. But the fix can be disabled by compiling ** with -DSQLITE_ALLOW_ROWID_IN_VIEW in case there are legacy apps that - ** depend upon the old buggy behavior. */ -#ifndef SQLITE_ALLOW_ROWID_IN_VIEW - p->tabFlags |= TF_NoVisibleRowid; + ** depend upon the old buggy behavior. The ability can also be toggled + ** using sqlite3_config(SQLITE_CONFIG_ROWID_IN_VIEW,...) */ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + p->tabFlags |= sqlite3Config.mNoVisibleRowid; /* Optional. Allow by default */ +#else + p->tabFlags |= TF_NoVisibleRowid; /* Never allow rowid in view */ #endif sqlite3TwoPartName(pParse, pName1, pName2, &pName); @@ -123631,8 +124432,9 @@ SQLITE_PRIVATE void sqlite3CreateView( #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** The Table structure pTable is really a VIEW. Fill in the names of -** the columns of the view in the pTable structure. Return the number -** of errors. If an error is seen leave an error message in pParse->zErrMsg. +** the columns of the view in the pTable structure. Return non-zero if +** there are errors. If an error is seen an error message is left +** in pParse->zErrMsg. */ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ Table *pSelTab; /* A fake table from which we get the result set */ @@ -123755,7 +124557,7 @@ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ sqlite3DeleteColumnNames(db, pTable); } #endif /* SQLITE_OMIT_VIEW */ - return nErr; + return nErr + pParse->nErr; } SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ assert( pTable!=0 ); @@ -128948,13 +129750,13 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ double r1, r2; const char *zVal; r1 = sqlite3_value_double(pValue); - sqlite3_str_appendf(pStr, "%!.15g", r1); + sqlite3_str_appendf(pStr, "%!0.15g", r1); zVal = sqlite3_str_value(pStr); if( zVal ){ sqlite3AtoF(zVal, &r2, pStr->nChar, SQLITE_UTF8); if( r1!=r2 ){ sqlite3_str_reset(pStr); - sqlite3_str_appendf(pStr, "%!.20e", r1); + sqlite3_str_appendf(pStr, "%!0.20e", r1); } } break; @@ -129256,7 +130058,7 @@ static void replaceFunc( } if( zPattern[0]==0 ){ assert( sqlite3_value_type(argv[1])!=SQLITE_NULL ); - sqlite3_result_value(context, argv[0]); + sqlite3_result_text(context, (const char*)zStr, nStr, SQLITE_TRANSIENT); return; } nPattern = sqlite3_value_bytes(argv[1]); @@ -129739,7 +130541,7 @@ static void sumFinalize(sqlite3_context *context){ if( p->approx ){ if( p->ovrfl ){ sqlite3_result_error(context,"integer overflow",-1); - }else if( !sqlite3IsNaN(p->rErr) ){ + }else if( !sqlite3IsOverflow(p->rErr) ){ sqlite3_result_double(context, p->rSum+p->rErr); }else{ sqlite3_result_double(context, p->rSum); @@ -129756,7 +130558,7 @@ static void avgFinalize(sqlite3_context *context){ double r; if( p->approx ){ r = p->rSum; - if( !sqlite3IsNaN(p->rErr) ) r += p->rErr; + if( !sqlite3IsOverflow(p->rErr) ) r += p->rErr; }else{ r = (double)(p->iSum); } @@ -129770,7 +130572,7 @@ static void totalFinalize(sqlite3_context *context){ if( p ){ if( p->approx ){ r = p->rSum; - if( !sqlite3IsNaN(p->rErr) ) r += p->rErr; + if( !sqlite3IsOverflow(p->rErr) ) r += p->rErr; }else{ r = (double)(p->iSum); } @@ -130053,6 +130855,8 @@ static void groupConcatValue(sqlite3_context *context){ sqlite3_result_error_toobig(context); }else if( pAccum->accError==SQLITE_NOMEM ){ sqlite3_result_error_nomem(context); + }else if( pGCC->nAccum>0 && pAccum->nChar==0 ){ + sqlite3_result_text(context, "", 1, SQLITE_STATIC); }else{ const char *zText = sqlite3_str_value(pAccum); sqlite3_result_text(context, zText, pAccum->nChar, SQLITE_TRANSIENT); @@ -132667,6 +133471,196 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ # define autoIncStep(A,B,C) #endif /* SQLITE_OMIT_AUTOINCREMENT */ +/* +** If argument pVal is a Select object returned by an sqlite3MultiValues() +** that was able to use the co-routine optimization, finish coding the +** co-routine. +*/ +SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal){ + if( ALWAYS(pVal) && pVal->pSrc->nSrc>0 ){ + SrcItem *pItem = &pVal->pSrc->a[0]; + sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->regReturn); + sqlite3VdbeJumpHere(pParse->pVdbe, pItem->addrFillSub - 1); + } +} + +/* +** Return true if all expressions in the expression-list passed as the +** only argument are constant. +*/ +static int exprListIsConstant(Parse *pParse, ExprList *pRow){ + int ii; + for(ii=0; iinExpr; ii++){ + if( 0==sqlite3ExprIsConstant(pParse, pRow->a[ii].pExpr) ) return 0; + } + return 1; +} + +/* +** Return true if all expressions in the expression-list passed as the +** only argument are both constant and have no affinity. +*/ +static int exprListIsNoAffinity(Parse *pParse, ExprList *pRow){ + int ii; + if( exprListIsConstant(pParse,pRow)==0 ) return 0; + for(ii=0; iinExpr; ii++){ + Expr *pExpr = pRow->a[ii].pExpr; + assert( pExpr->op!=TK_RAISE ); + assert( pExpr->affExpr==0 ); + if( 0!=sqlite3ExprAffinity(pExpr) ) return 0; + } + return 1; + +} + +/* +** This function is called by the parser for the second and subsequent +** rows of a multi-row VALUES clause. Argument pLeft is the part of +** the VALUES clause already parsed, argument pRow is the vector of values +** for the new row. The Select object returned represents the complete +** VALUES clause, including the new row. +** +** There are two ways in which this may be achieved - by incremental +** coding of a co-routine (the "co-routine" method) or by returning a +** Select object equivalent to the following (the "UNION ALL" method): +** +** "pLeft UNION ALL SELECT pRow" +** +** If the VALUES clause contains a lot of rows, this compound Select +** object may consume a lot of memory. +** +** When the co-routine method is used, each row that will be returned +** by the VALUES clause is coded into part of a co-routine as it is +** passed to this function. The returned Select object is equivalent to: +** +** SELECT * FROM ( +** Select object to read co-routine +** ) +** +** The co-routine method is used in most cases. Exceptions are: +** +** a) If the current statement has a WITH clause. This is to avoid +** statements like: +** +** WITH cte AS ( VALUES('x'), ('y') ... ) +** SELECT * FROM cte AS a, cte AS b; +** +** This will not work, as the co-routine uses a hard-coded register +** for its OP_Yield instructions, and so it is not possible for two +** cursors to iterate through it concurrently. +** +** b) The schema is currently being parsed (i.e. the VALUES clause is part +** of a schema item like a VIEW or TRIGGER). In this case there is no VM +** being generated when parsing is taking place, and so generating +** a co-routine is not possible. +** +** c) There are non-constant expressions in the VALUES clause (e.g. +** the VALUES clause is part of a correlated sub-query). +** +** d) One or more of the values in the first row of the VALUES clause +** has an affinity (i.e. is a CAST expression). This causes problems +** because the complex rules SQLite uses (see function +** sqlite3SubqueryColumnTypes() in select.c) to determine the effective +** affinity of such a column for all rows require access to all values in +** the column simultaneously. +*/ +SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList *pRow){ + + if( pParse->bHasWith /* condition (a) above */ + || pParse->db->init.busy /* condition (b) above */ + || exprListIsConstant(pParse,pRow)==0 /* condition (c) above */ + || (pLeft->pSrc->nSrc==0 && + exprListIsNoAffinity(pParse,pLeft->pEList)==0) /* condition (d) above */ + || IN_SPECIAL_PARSE + ){ + /* The co-routine method cannot be used. Fall back to UNION ALL. */ + Select *pSelect = 0; + int f = SF_Values | SF_MultiValue; + if( pLeft->pSrc->nSrc ){ + sqlite3MultiValuesEnd(pParse, pLeft); + f = SF_Values; + }else if( pLeft->pPrior ){ + /* In this case set the SF_MultiValue flag only if it was set on pLeft */ + f = (f & pLeft->selFlags); + } + pSelect = sqlite3SelectNew(pParse, pRow, 0, 0, 0, 0, 0, f, 0); + pLeft->selFlags &= ~SF_MultiValue; + if( pSelect ){ + pSelect->op = TK_ALL; + pSelect->pPrior = pLeft; + pLeft = pSelect; + } + }else{ + SrcItem *p = 0; /* SrcItem that reads from co-routine */ + + if( pLeft->pSrc->nSrc==0 ){ + /* Co-routine has not yet been started and the special Select object + ** that accesses the co-routine has not yet been created. This block + ** does both those things. */ + Vdbe *v = sqlite3GetVdbe(pParse); + Select *pRet = sqlite3SelectNew(pParse, 0, 0, 0, 0, 0, 0, 0, 0); + + /* Ensure the database schema has been read. This is to ensure we have + ** the correct text encoding. */ + if( (pParse->db->mDbFlags & DBFLAG_SchemaKnownOk)==0 ){ + sqlite3ReadSchema(pParse); + } + + if( pRet ){ + SelectDest dest; + pRet->pSrc->nSrc = 1; + pRet->pPrior = pLeft->pPrior; + pRet->op = pLeft->op; + if( pRet->pPrior ) pRet->selFlags |= SF_Values; + pLeft->pPrior = 0; + pLeft->op = TK_SELECT; + assert( pLeft->pNext==0 ); + assert( pRet->pNext==0 ); + p = &pRet->pSrc->a[0]; + p->pSelect = pLeft; + p->fg.viaCoroutine = 1; + p->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; + p->regReturn = ++pParse->nMem; + p->iCursor = -1; + p->u1.nRow = 2; + sqlite3VdbeAddOp3(v,OP_InitCoroutine,p->regReturn,0,p->addrFillSub); + sqlite3SelectDestInit(&dest, SRT_Coroutine, p->regReturn); + + /* Allocate registers for the output of the co-routine. Do so so + ** that there are two unused registers immediately before those + ** used by the co-routine. This allows the code in sqlite3Insert() + ** to use these registers directly, instead of copying the output + ** of the co-routine to a separate array for processing. */ + dest.iSdst = pParse->nMem + 3; + dest.nSdst = pLeft->pEList->nExpr; + pParse->nMem += 2 + dest.nSdst; + + pLeft->selFlags |= SF_MultiValue; + sqlite3Select(pParse, pLeft, &dest); + p->regResult = dest.iSdst; + assert( pParse->nErr || dest.iSdst>0 ); + pLeft = pRet; + } + }else{ + p = &pLeft->pSrc->a[0]; + assert( !p->fg.isTabFunc && !p->fg.isIndexedBy ); + p->u1.nRow++; + } + + if( pParse->nErr==0 ){ + assert( p!=0 ); + if( p->pSelect->pEList->nExpr!=pRow->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, p->pSelect); + }else{ + sqlite3ExprCodeExprList(pParse, pRow, p->regResult, 0, 0); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, p->regReturn); + } + } + sqlite3ExprListDelete(pParse->db, pRow); + } + + return pLeft; +} /* Forward declaration */ static int xferOptimization( @@ -133003,25 +133997,40 @@ SQLITE_PRIVATE void sqlite3Insert( if( pSelect ){ /* Data is coming from a SELECT or from a multi-row VALUES clause. ** Generate a co-routine to run the SELECT. */ - int regYield; /* Register holding co-routine entry-point */ - int addrTop; /* Top of the co-routine */ int rc; /* Result code */ - regYield = ++pParse->nMem; - addrTop = sqlite3VdbeCurrentAddr(v) + 1; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); - sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); - dest.iSdst = bIdListInOrder ? regData : 0; - dest.nSdst = pTab->nCol; - rc = sqlite3Select(pParse, pSelect, &dest); - regFromSelect = dest.iSdst; - assert( db->pParse==pParse ); - if( rc || pParse->nErr ) goto insert_cleanup; - assert( db->mallocFailed==0 ); - sqlite3VdbeEndCoroutine(v, regYield); - sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ - assert( pSelect->pEList ); - nColumn = pSelect->pEList->nExpr; + if( pSelect->pSrc->nSrc==1 + && pSelect->pSrc->a[0].fg.viaCoroutine + && pSelect->pPrior==0 + ){ + SrcItem *pItem = &pSelect->pSrc->a[0]; + dest.iSDParm = pItem->regReturn; + regFromSelect = pItem->regResult; + nColumn = pItem->pSelect->pEList->nExpr; + ExplainQueryPlan((pParse, 0, "SCAN %S", pItem)); + if( bIdListInOrder && nColumn==pTab->nCol ){ + regData = regFromSelect; + regRowid = regData - 1; + regIns = regRowid - (IsVirtual(pTab) ? 1 : 0); + } + }else{ + int addrTop; /* Top of the co-routine */ + int regYield = ++pParse->nMem; + addrTop = sqlite3VdbeCurrentAddr(v) + 1; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); + sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); + dest.iSdst = bIdListInOrder ? regData : 0; + dest.nSdst = pTab->nCol; + rc = sqlite3Select(pParse, pSelect, &dest); + regFromSelect = dest.iSdst; + assert( db->pParse==pParse ); + if( rc || pParse->nErr ) goto insert_cleanup; + assert( db->mallocFailed==0 ); + sqlite3VdbeEndCoroutine(v, regYield); + sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ + assert( pSelect->pEList ); + nColumn = pSelect->pEList->nExpr; + } /* Set useTempTable to TRUE if the result of the SELECT statement ** should be written into a temporary table (template 4). Set to @@ -133176,7 +134185,7 @@ SQLITE_PRIVATE void sqlite3Insert( pNx->iDataCur = iDataCur; pNx->iIdxCur = iIdxCur; if( pNx->pUpsertTarget ){ - if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx) ){ + if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx, pUpsert) ){ goto insert_cleanup; } } @@ -135068,7 +136077,10 @@ static int xferOptimization( } } #ifndef SQLITE_OMIT_CHECK - if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) ){ + if( pDest->pCheck + && (db->mDbFlags & DBFLAG_Vacuum)==0 + && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) + ){ return 0; /* Tables have different CHECK constraints. Ticket #2252 */ } #endif @@ -137743,6 +138755,34 @@ static const PragmaName aPragmaName[] = { /************** End of pragma.h **********************************************/ /************** Continuing where we left off in pragma.c *********************/ +/* +** When the 0x10 bit of PRAGMA optimize is set, any ANALYZE commands +** will be run with an analysis_limit set to the lessor of the value of +** the following macro or to the actual analysis_limit if it is non-zero, +** in order to prevent PRAGMA optimize from running for too long. +** +** The value of 2000 is chosen emperically so that the worst-case run-time +** for PRAGMA optimize does not exceed 100 milliseconds against a variety +** of test databases on a RaspberryPI-4 compiled using -Os and without +** -DSQLITE_DEBUG. Of course, your mileage may vary. For the purpose of +** this paragraph, "worst-case" means that ANALYZE ends up being +** run on every table in the database. The worst case typically only +** happens if PRAGMA optimize is run on a database file for which ANALYZE +** has not been previously run and the 0x10000 flag is included so that +** all tables are analyzed. The usual case for PRAGMA optimize is that +** no ANALYZE commands will be run at all, or if any ANALYZE happens it +** will be against a single table, so that expected timing for PRAGMA +** optimize on a PI-4 is more like 1 millisecond or less with the 0x10000 +** flag or less than 100 microseconds without the 0x10000 flag. +** +** An analysis limit of 2000 is almost always sufficient for the query +** planner to fully characterize an index. The additional accuracy from +** a larger analysis is not usually helpful. +*/ +#ifndef SQLITE_DEFAULT_OPTIMIZE_LIMIT +# define SQLITE_DEFAULT_OPTIMIZE_LIMIT 2000 +#endif + /* ** Interpret the given string as a safety level. Return 0 for OFF, ** 1 for ON or NORMAL, 2 for FULL, and 3 for EXTRA. Return 1 for an empty or @@ -139388,7 +140428,7 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; if( zRight ){ - if( sqlite3GetInt32(zRight, &mxErr) ){ + if( sqlite3GetInt32(pValue->z, &mxErr) ){ if( mxErr<=0 ){ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; } @@ -139405,7 +140445,6 @@ SQLITE_PRIVATE void sqlite3Pragma( Hash *pTbls; /* Set of all tables in the schema */ int *aRoot; /* Array of root page numbers of all btrees */ int cnt = 0; /* Number of entries in aRoot[] */ - int mxIdx = 0; /* Maximum number of indexes for any table */ if( OMIT_TEMPDB && i==1 ) continue; if( iDb>=0 && i!=iDb ) continue; @@ -139427,7 +140466,6 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pObjTab && pObjTab!=pTab ) continue; if( HasRowid(pTab) ) cnt++; for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; } - if( nIdx>mxIdx ) mxIdx = nIdx; } if( cnt==0 ) continue; if( pObjTab ) cnt++; @@ -139447,11 +140485,11 @@ SQLITE_PRIVATE void sqlite3Pragma( aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ - sqlite3TouchRegister(pParse, 8+mxIdx); + sqlite3TouchRegister(pParse, 8+cnt); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ - sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY); + sqlite3VdbeAddOp4(v, OP_IntegrityCk, 1, cnt, 8, (char*)aRoot,P4_INTARRAY); sqlite3VdbeChangeP5(v, (u8)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, @@ -139461,6 +140499,36 @@ SQLITE_PRIVATE void sqlite3Pragma( integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, addr); + /* Check that the indexes all have the right number of rows */ + cnt = pObjTab ? 1 : 0; + sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + int iTab = 0; + Table *pTab = sqliteHashData(x); + Index *pIdx; + if( pObjTab && pObjTab!=pTab ) continue; + if( HasRowid(pTab) ){ + iTab = cnt++; + }else{ + iTab = cnt; + for(pIdx=pTab->pIndex; ALWAYS(pIdx); pIdx=pIdx->pNext){ + if( IsPrimaryKeyIndex(pIdx) ) break; + iTab++; + } + } + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->pPartIdxWhere==0 ){ + addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+cnt, 0, 8+iTab); + VdbeCoverageNeverNull(v); + sqlite3VdbeLoadString(v, 4, pIdx->zName); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, addr); + } + cnt++; + } + } + /* Make sure all the indices are constructed correctly. */ for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ @@ -139475,31 +140543,7 @@ SQLITE_PRIVATE void sqlite3Pragma( int mxCol; /* Maximum non-virtual column number */ if( pObjTab && pObjTab!=pTab ) continue; - if( !IsOrdinaryTable(pTab) ){ -#ifndef SQLITE_OMIT_VIRTUALTABLE - sqlite3_vtab *pVTab; - int a1; - if( !IsVirtual(pTab) ) continue; - if( pTab->nCol<=0 ){ - const char *zMod = pTab->u.vtab.azArg[0]; - if( sqlite3HashFind(&db->aModule, zMod)==0 ) continue; - } - sqlite3ViewGetColumnNames(pParse, pTab); - if( pTab->u.vtab.p==0 ) continue; - pVTab = pTab->u.vtab.p->pVtab; - if( NEVER(pVTab==0) ) continue; - if( NEVER(pVTab->pModule==0) ) continue; - if( pVTab->pModule->iVersion<4 ) continue; - if( pVTab->pModule->xIntegrity==0 ) continue; - sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick); - pTab->nTabRef++; - sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF); - a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, a1); -#endif - continue; - } + if( !IsOrdinaryTable(pTab) ) continue; if( isQuick || HasRowid(pTab) ){ pPk = 0; r2 = 0; @@ -139634,6 +140678,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** is REAL, we have to load the actual data using OP_Column ** to reliably determine if the value is a NULL. */ sqlite3VdbeAddOp3(v, OP_Column, p1, p3, 3); + sqlite3ColumnDefault(v, pTab, j, 3); jmp3 = sqlite3VdbeAddOp2(v, OP_NotNull, 3, labelOk); VdbeCoverage(v); } @@ -139807,23 +140852,43 @@ SQLITE_PRIVATE void sqlite3Pragma( } sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); sqlite3VdbeJumpHere(v, loopTop-1); - if( !isQuick ){ - sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - if( pPk==pIdx ) continue; - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); - addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+j, 0, 3); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - sqlite3VdbeLoadString(v, 4, pIdx->zName); - sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, addr); - } - if( pPk ){ - sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); - } + if( pPk ){ + assert( !isQuick ); + sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); } } + +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* Second pass to invoke the xIntegrity method on all virtual + ** tables. + */ + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + sqlite3_vtab *pVTab; + int a1; + if( pObjTab && pObjTab!=pTab ) continue; + if( IsOrdinaryTable(pTab) ) continue; + if( !IsVirtual(pTab) ) continue; + if( pTab->nCol<=0 ){ + const char *zMod = pTab->u.vtab.azArg[0]; + if( sqlite3HashFind(&db->aModule, zMod)==0 ) continue; + } + sqlite3ViewGetColumnNames(pParse, pTab); + if( pTab->u.vtab.p==0 ) continue; + pVTab = pTab->u.vtab.p->pVtab; + if( NEVER(pVTab==0) ) continue; + if( NEVER(pVTab->pModule==0) ) continue; + if( pVTab->pModule->iVersion<4 ) continue; + if( pVTab->pModule->xIntegrity==0 ) continue; + sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick); + pTab->nTabRef++; + sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF); + a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, a1); + continue; + } +#endif } { static const int iLn = VDBE_OFFSET_LINENO(2); @@ -140087,44 +141152,63 @@ SQLITE_PRIVATE void sqlite3Pragma( ** ** The optional argument is a bitmask of optimizations to perform: ** - ** 0x0001 Debugging mode. Do not actually perform any optimizations - ** but instead return one line of text for each optimization - ** that would have been done. Off by default. + ** 0x00001 Debugging mode. Do not actually perform any optimizations + ** but instead return one line of text for each optimization + ** that would have been done. Off by default. ** - ** 0x0002 Run ANALYZE on tables that might benefit. On by default. - ** See below for additional information. + ** 0x00002 Run ANALYZE on tables that might benefit. On by default. + ** See below for additional information. ** - ** 0x0004 (Not yet implemented) Record usage and performance - ** information from the current session in the - ** database file so that it will be available to "optimize" - ** pragmas run by future database connections. + ** 0x00010 Run all ANALYZE operations using an analysis_limit that + ** is the lessor of the current analysis_limit and the + ** SQLITE_DEFAULT_OPTIMIZE_LIMIT compile-time option. + ** The default value of SQLITE_DEFAULT_OPTIMIZE_LIMIT is + ** currently (2024-02-19) set to 2000, which is such that + ** the worst case run-time for PRAGMA optimize on a 100MB + ** database will usually be less than 100 milliseconds on + ** a RaspberryPI-4 class machine. On by default. ** - ** 0x0008 (Not yet implemented) Create indexes that might have - ** been helpful to recent queries + ** 0x10000 Look at tables to see if they need to be reanalyzed + ** due to growth or shrinkage even if they have not been + ** queried during the current connection. Off by default. ** - ** The default MASK is and always shall be 0xfffe. 0xfffe means perform all - ** of the optimizations listed above except Debug Mode, including new - ** optimizations that have not yet been invented. If new optimizations are - ** ever added that should be off by default, those off-by-default - ** optimizations will have bitmasks of 0x10000 or larger. + ** The default MASK is and always shall be 0x0fffe. In the current + ** implementation, the default mask only covers the 0x00002 optimization, + ** though additional optimizations that are covered by 0x0fffe might be + ** added in the future. Optimizations that are off by default and must + ** be explicitly requested have masks of 0x10000 or greater. ** ** DETERMINATION OF WHEN TO RUN ANALYZE ** ** In the current implementation, a table is analyzed if only if all of ** the following are true: ** - ** (1) MASK bit 0x02 is set. + ** (1) MASK bit 0x00002 is set. ** - ** (2) The query planner used sqlite_stat1-style statistics for one or - ** more indexes of the table at some point during the lifetime of - ** the current connection. + ** (2) The table is an ordinary table, not a virtual table or view. ** - ** (3) One or more indexes of the table are currently unanalyzed OR - ** the number of rows in the table has increased by 25 times or more - ** since the last time ANALYZE was run. + ** (3) The table name does not begin with "sqlite_". + ** + ** (4) One or more of the following is true: + ** (4a) The 0x10000 MASK bit is set. + ** (4b) One or more indexes on the table lacks an entry + ** in the sqlite_stat1 table. + ** (4c) The query planner used sqlite_stat1-style statistics for one + ** or more indexes of the table at some point during the lifetime + ** of the current connection. + ** + ** (5) One or more of the following is true: + ** (5a) One or more indexes on the table lacks an entry + ** in the sqlite_stat1 table. (Same as 4a) + ** (5b) The number of rows in the table has increased or decreased by + ** 10-fold. In other words, the current size of the table is + ** 10 times larger than the size in sqlite_stat1 or else the + ** current size is less than 1/10th the size in sqlite_stat1. ** ** The rules for when tables are analyzed are likely to change in - ** future releases. + ** future releases. Future versions of SQLite might accept a string + ** literal argument to this pragma that contains a mnemonic description + ** of the options rather than a bitmap. */ case PragTyp_OPTIMIZE: { int iDbLast; /* Loop termination point for the schema loop */ @@ -140136,6 +141220,10 @@ SQLITE_PRIVATE void sqlite3Pragma( LogEst szThreshold; /* Size threshold above which reanalysis needed */ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */ u32 opMask; /* Mask of operations to perform */ + int nLimit; /* Analysis limit to use */ + int nCheck = 0; /* Number of tables to be optimized */ + int nBtree = 0; /* Number of btrees to scan */ + int nIndex; /* Number of indexes on the current table */ if( zRight ){ opMask = (u32)sqlite3Atoi(zRight); @@ -140143,6 +141231,14 @@ SQLITE_PRIVATE void sqlite3Pragma( }else{ opMask = 0xfffe; } + if( (opMask & 0x10)==0 ){ + nLimit = 0; + }else if( db->nAnalysisLimit>0 + && db->nAnalysisLimitnTab++; for(iDbLast = zDb?iDb:db->nDb-1; iDb<=iDbLast; iDb++){ if( iDb==1 ) continue; @@ -140151,23 +141247,61 @@ SQLITE_PRIVATE void sqlite3Pragma( for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ pTab = (Table*)sqliteHashData(k); - /* If table pTab has not been used in a way that would benefit from - ** having analysis statistics during the current session, then skip it. - ** This also has the effect of skipping virtual tables and views */ - if( (pTab->tabFlags & TF_StatsUsed)==0 ) continue; + /* This only works for ordinary tables */ + if( !IsOrdinaryTable(pTab) ) continue; + + /* Do not scan system tables */ + if( 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) ) continue; - /* Reanalyze if the table is 25 times larger than the last analysis */ - szThreshold = pTab->nRowLogEst + 46; assert( sqlite3LogEst(25)==46 ); + /* Find the size of the table as last recorded in sqlite_stat1. + ** If any index is unanalyzed, then the threshold is -1 to + ** indicate a new, unanalyzed index + */ + szThreshold = pTab->nRowLogEst; + nIndex = 0; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + nIndex++; if( !pIdx->hasStat1 ){ - szThreshold = 0; /* Always analyze if any index lacks statistics */ - break; + szThreshold = -1; /* Always analyze if any index lacks statistics */ } } - if( szThreshold ){ - sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); - sqlite3VdbeAddOp3(v, OP_IfSmaller, iTabCur, - sqlite3VdbeCurrentAddr(v)+2+(opMask&1), szThreshold); + + /* If table pTab has not been used in a way that would benefit from + ** having analysis statistics during the current session, then skip it, + ** unless the 0x10000 MASK bit is set. */ + if( (pTab->tabFlags & TF_MaybeReanalyze)!=0 ){ + /* Check for size change if stat1 has been used for a query */ + }else if( opMask & 0x10000 ){ + /* Check for size change if 0x10000 is set */ + }else if( pTab->pIndex!=0 && szThreshold<0 ){ + /* Do analysis if unanalyzed indexes exists */ + }else{ + /* Otherwise, we can skip this table */ + continue; + } + + nCheck++; + if( nCheck==2 ){ + /* If ANALYZE might be invoked two or more times, hold a write + ** transaction for efficiency */ + sqlite3BeginWriteOperation(pParse, 0, iDb); + } + nBtree += nIndex+1; + + /* Reanalyze if the table is 10 times larger or smaller than + ** the last analysis. Unconditional reanalysis if there are + ** unanalyzed indexes. */ + sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); + if( szThreshold>=0 ){ + const LogEst iRange = 33; /* 10x size change */ + sqlite3VdbeAddOp4Int(v, OP_IfSizeBetween, iTabCur, + sqlite3VdbeCurrentAddr(v)+2+(opMask&1), + szThreshold>=iRange ? szThreshold-iRange : -1, + szThreshold+iRange); + VdbeCoverage(v); + }else{ + sqlite3VdbeAddOp2(v, OP_Rewind, iTabCur, + sqlite3VdbeCurrentAddr(v)+2+(opMask&1)); VdbeCoverage(v); } zSubSql = sqlite3MPrintf(db, "ANALYZE \"%w\".\"%w\"", @@ -140177,11 +141311,27 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3VdbeAddOp4(v, OP_String8, 0, r1, 0, zSubSql, P4_DYNAMIC); sqlite3VdbeAddOp2(v, OP_ResultRow, r1, 1); }else{ - sqlite3VdbeAddOp4(v, OP_SqlExec, 0, 0, 0, zSubSql, P4_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_SqlExec, nLimit ? 0x02 : 00, nLimit, 0, + zSubSql, P4_DYNAMIC); } } } sqlite3VdbeAddOp0(v, OP_Expire); + + /* In a schema with a large number of tables and indexes, scale back + ** the analysis_limit to avoid excess run-time in the worst case. + */ + if( !db->mallocFailed && nLimit>0 && nBtree>100 ){ + int iAddr, iEnd; + VdbeOp *aOp; + nLimit = 100*nLimit/nBtree; + if( nLimit<100 ) nLimit = 100; + aOp = sqlite3VdbeGetOp(v, 0); + iEnd = sqlite3VdbeCurrentAddr(v); + for(iAddr=0; iAddrnConstraint; i++, pConstraint++){ - if( pConstraint->usable==0 ) continue; - if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; if( pConstraint->iColumn < pTab->iHidden ) continue; + if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + if( pConstraint->usable==0 ) return SQLITE_CONSTRAINT; j = pConstraint->iColumn - pTab->iHidden; assert( j < 2 ); seen[j] = i+1; @@ -140460,12 +141610,13 @@ static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ j = seen[0]-1; pIdxInfo->aConstraintUsage[j].argvIndex = 1; pIdxInfo->aConstraintUsage[j].omit = 1; - if( seen[1]==0 ) return SQLITE_OK; pIdxInfo->estimatedCost = (double)20; pIdxInfo->estimatedRows = 20; - j = seen[1]-1; - pIdxInfo->aConstraintUsage[j].argvIndex = 2; - pIdxInfo->aConstraintUsage[j].omit = 1; + if( seen[1] ){ + j = seen[1]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 2; + pIdxInfo->aConstraintUsage[j].omit = 1; + } return SQLITE_OK; } @@ -140485,6 +141636,7 @@ static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){ int i; sqlite3_finalize(pCsr->pPragma); pCsr->pPragma = 0; + pCsr->iRowid = 0; for(i=0; iazArg); i++){ sqlite3_free(pCsr->azArg[i]); pCsr->azArg[i] = 0; @@ -141285,7 +142437,13 @@ SQLITE_PRIVATE void *sqlite3ParserAddCleanup( void (*xCleanup)(sqlite3*,void*), /* The cleanup routine */ void *pPtr /* Pointer to object to be cleaned up */ ){ - ParseCleanup *pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + ParseCleanup *pCleanup; + if( sqlite3FaultSim(300) ){ + pCleanup = 0; + sqlite3OomFault(pParse->db); + }else{ + pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + } if( pCleanup ){ pCleanup->pNext = pParse->pCleanup; pParse->pCleanup = pCleanup; @@ -143407,9 +144565,16 @@ static void generateSortTail( int addrExplain; /* Address of OP_Explain instruction */ #endif - ExplainQueryPlan2(addrExplain, (pParse, 0, - "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat>0?"RIGHT PART OF ":"") - ); + nKey = pOrderBy->nExpr - pSort->nOBSat; + if( pSort->nOBSat==0 || nKey==1 ){ + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat?"LAST TERM OF ":"" + )); + }else{ + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR LAST %d TERMS OF ORDER BY", nKey + )); + } sqlite3VdbeScanStatusRange(v, addrExplain,pSort->addrPush,pSort->addrPushEnd); sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, pSort->addrPush); @@ -143447,7 +144612,6 @@ static void generateSortTail( regRow = sqlite3GetTempRange(pParse, nColumn); } } - nKey = pOrderBy->nExpr - pSort->nOBSat; if( pSort->sortFlags & SORTFLAG_UseSorter ){ int regSortOut = ++pParse->nMem; iSortTab = pParse->nTab++; @@ -143687,11 +144851,7 @@ static const char *columnTypeImpl( ** data for the result-set column of the sub-select. */ if( iColpEList->nExpr -#ifdef SQLITE_ALLOW_ROWID_IN_VIEW - && iCol>=0 -#else - && ALWAYS(iCol>=0) -#endif + && (!ViewCanHaveRowid || iCol>=0) ){ /* If iCol is less than zero, then the expression requests the ** rowid of the sub-select or view. This expression is legal (see @@ -144056,8 +145216,7 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( NameContext sNC; assert( pSelect!=0 ); - testcase( (pSelect->selFlags & SF_Resolved)==0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); + assert( (pSelect->selFlags & SF_Resolved)!=0 ); assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); if( db->mallocFailed || IN_RENAME_OBJECT ) return; @@ -144068,17 +145227,22 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ const char *zType; i64 n; + int m = 0; + Select *pS2 = pSelect; pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; /* pCol->szEst = ... // Column size est for SELECT tables never used */ pCol->affinity = sqlite3ExprAffinity(p); + while( pCol->affinity<=SQLITE_AFF_NONE && pS2->pNext!=0 ){ + m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); + pS2 = pS2->pNext; + pCol->affinity = sqlite3ExprAffinity(pS2->pEList->a[i].pExpr); + } if( pCol->affinity<=SQLITE_AFF_NONE ){ pCol->affinity = aff; } - if( pCol->affinity>=SQLITE_AFF_TEXT && pSelect->pNext ){ - int m = 0; - Select *pS2; - for(m=0, pS2=pSelect->pNext; pS2; pS2=pS2->pNext){ + if( pCol->affinity>=SQLITE_AFF_TEXT && (pS2->pNext || pS2!=pSelect) ){ + for(pS2=pS2->pNext; pS2; pS2=pS2->pNext){ m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); } if( pCol->affinity==SQLITE_AFF_TEXT && (m&0x01)!=0 ){ @@ -144108,12 +145272,12 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( } } if( zType ){ - i64 m = sqlite3Strlen30(zType); + const i64 k = sqlite3Strlen30(zType); n = sqlite3Strlen30(pCol->zCnName); - pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+k+2); pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); if( pCol->zCnName ){ - memcpy(&pCol->zCnName[n+1], zType, m+1); + memcpy(&pCol->zCnName[n+1], zType, k+1); pCol->colFlags |= COLFLAG_HASTYPE; } } @@ -146510,7 +147674,7 @@ static void constInsert( ){ int i; assert( pColumn->op==TK_COLUMN ); - assert( sqlite3ExprIsConstant(pValue) ); + assert( sqlite3ExprIsConstant(pConst->pParse, pValue) ); if( ExprHasProperty(pColumn, EP_FixedCol) ) return; if( sqlite3ExprAffinity(pValue)!=0 ) return; @@ -146568,10 +147732,10 @@ static void findConstInWhere(WhereConst *pConst, Expr *pExpr){ pLeft = pExpr->pLeft; assert( pRight!=0 ); assert( pLeft!=0 ); - if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pLeft) ){ + if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pConst->pParse, pLeft) ){ constInsert(pConst,pRight,pLeft,pExpr); } - if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pRight) ){ + if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pConst->pParse, pRight) ){ constInsert(pConst,pLeft,pRight,pExpr); } } @@ -146792,6 +147956,18 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** The hope is that the terms added to the inner query will make it more ** efficient. ** +** NAME AMBIGUITY +** +** This optimization is called the "WHERE-clause push-down optimization". +** +** Do not confuse this optimization with another unrelated optimization +** with a similar name: The "MySQL push-down optimization" causes WHERE +** clause terms that can be evaluated using only the index and without +** reference to the table are run first, so that if they are false, +** unnecessary table seeks are avoided. +** +** RULES +** ** Do not attempt this optimization if: ** ** (1) (** This restriction was removed on 2017-09-29. We used to @@ -146857,15 +148033,19 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** (9c) There is a RIGHT JOIN (or FULL JOIN) in between the ON/USING ** clause and the subquery. ** -** Without this restriction, the push-down optimization might move -** the ON/USING filter expression from the left side of a RIGHT JOIN -** over to the right side, which leads to incorrect answers. See -** also restriction (6) in sqlite3ExprIsSingleTableConstraint(). +** Without this restriction, the WHERE-clause push-down optimization +** might move the ON/USING filter expression from the left side of a +** RIGHT JOIN over to the right side, which leads to incorrect answers. +** See also restriction (6) in sqlite3ExprIsSingleTableConstraint(). ** ** (10) The inner query is not the right-hand table of a RIGHT JOIN. ** ** (11) The subquery is not a VALUES clause ** +** (12) The WHERE clause is not "rowid ISNULL" or the equivalent. This +** case only comes up if SQLite is compiled using +** SQLITE_ALLOW_ROWID_IN_VIEW. +** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. */ @@ -146976,7 +148156,19 @@ static int pushDownWhereTerms( } #endif - if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc) ){ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + if( ViewCanHaveRowid && (pWhere->op==TK_ISNULL || pWhere->op==TK_NOTNULL) ){ + Expr *pLeft = pWhere->pLeft; + if( ALWAYS(pLeft) + && pLeft->op==TK_COLUMN + && pLeft->iColumn < 0 + ){ + return 0; /* Restriction (12) */ + } + } +#endif + + if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc, 1) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){ @@ -147603,12 +148795,14 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ while( pSel->pPrior ){ pSel = pSel->pPrior; } sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol); pTab->iPKey = -1; + pTab->eTabType = TABTYP_VIEW; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); #ifndef SQLITE_ALLOW_ROWID_IN_VIEW /* The usual case - do not allow ROWID on a subquery */ pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; #else - pTab->tabFlags |= TF_Ephemeral; /* Legacy compatibility mode */ + /* Legacy compatibility mode */ + pTab->tabFlags |= TF_Ephemeral | sqlite3Config.mNoVisibleRowid; #endif return pParse->nErr ? SQLITE_ERROR : SQLITE_OK; } @@ -147876,7 +149070,7 @@ static int selectExpander(Walker *pWalker, Select *p){ pNestedFrom = pFrom->pSelect->pEList; assert( pNestedFrom!=0 ); assert( pNestedFrom->nExpr==pTab->nCol ); - assert( VisibleRowid(pTab)==0 ); + assert( VisibleRowid(pTab)==0 || ViewCanHaveRowid ); }else{ if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ continue; @@ -147908,7 +149102,8 @@ static int selectExpander(Walker *pWalker, Select *p){ pUsing = 0; } - nAdd = pTab->nCol + (VisibleRowid(pTab) && (selFlags&SF_NestedFrom)); + nAdd = pTab->nCol; + if( VisibleRowid(pTab) && (selFlags & SF_NestedFrom)!=0 ) nAdd++; for(j=0; ja[pNew->nExpr-1]; assert( pX->zEName==0 ); if( (selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){ - if( pNestedFrom ){ + if( pNestedFrom && (!ViewCanHaveRowid || jnExpr) ){ + assert( jnExpr ); pX->zEName = sqlite3DbStrDup(db, pNestedFrom->a[j].zEName); testcase( pX->zEName==0 ); }else{ @@ -148107,8 +149303,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; - testcase( (p->selFlags & SF_Resolved)==0 ); - assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); + assert( (p->selFlags & SF_Resolved) ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; @@ -148178,6 +149373,8 @@ SQLITE_PRIVATE void sqlite3SelectPrep( */ static void printAggInfo(AggInfo *pAggInfo){ int ii; + sqlite3DebugPrintf("AggInfo %d/%p:\n", + pAggInfo->selId, pAggInfo); for(ii=0; iinColumn; ii++){ struct AggInfo_col *pCol = &pAggInfo->aCol[ii]; sqlite3DebugPrintf( @@ -149368,7 +150565,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Generate code for all sub-queries in the FROM clause */ pSub = pItem->pSelect; - if( pSub==0 ) continue; + if( pSub==0 || pItem->addrFillSub!=0 ) continue; /* The code for a subquery should only be generated once. */ assert( pItem->addrFillSub==0 ); @@ -149399,7 +150596,7 @@ SQLITE_PRIVATE int sqlite3Select( #endif assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ - TREETRACE(0x4000,pParse,p,("Push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("WHERE-lcause push-down not possible\n")); } /* Convert unused result columns of the subquery into simple NULL @@ -150280,6 +151477,12 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG if( pAggInfo && !db->mallocFailed ){ +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20,pParse,p,("Finished with AggInfo\n")); + printAggInfo(pAggInfo); + } +#endif for(i=0; inColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; if( pExpr==0 ) continue; @@ -151461,6 +152664,72 @@ static ExprList *sqlite3ExpandReturning( return pNew; } +/* If the Expr node is a subquery or an EXISTS operator or an IN operator that +** uses a subquery, and if the subquery is SF_Correlated, then mark the +** expression as EP_VarSelect. +*/ +static int sqlite3ReturningSubqueryVarSelect(Walker *NotUsed, Expr *pExpr){ + UNUSED_PARAMETER(NotUsed); + if( ExprUseXSelect(pExpr) + && (pExpr->x.pSelect->selFlags & SF_Correlated)!=0 + ){ + testcase( ExprHasProperty(pExpr, EP_VarSelect) ); + ExprSetProperty(pExpr, EP_VarSelect); + } + return WRC_Continue; +} + + +/* +** If the SELECT references the table pWalker->u.pTab, then do two things: +** +** (1) Mark the SELECT as as SF_Correlated. +** (2) Set pWalker->eCode to non-zero so that the caller will know +** that (1) has happened. +*/ +static int sqlite3ReturningSubqueryCorrelated(Walker *pWalker, Select *pSelect){ + int i; + SrcList *pSrc; + assert( pSelect!=0 ); + pSrc = pSelect->pSrc; + assert( pSrc!=0 ); + for(i=0; inSrc; i++){ + if( pSrc->a[i].pTab==pWalker->u.pTab ){ + testcase( pSelect->selFlags & SF_Correlated ); + pSelect->selFlags |= SF_Correlated; + pWalker->eCode = 1; + break; + } + } + return WRC_Continue; +} + +/* +** Scan the expression list that is the argument to RETURNING looking +** for subqueries that depend on the table which is being modified in the +** statement that is hosting the RETURNING clause (pTab). Mark all such +** subqueries as SF_Correlated. If the subqueries are part of an +** expression, mark the expression as EP_VarSelect. +** +** https://sqlite.org/forum/forumpost/2c83569ce8945d39 +*/ +static void sqlite3ProcessReturningSubqueries( + ExprList *pEList, + Table *pTab +){ + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3ExprWalkNoop; + w.xSelectCallback = sqlite3ReturningSubqueryCorrelated; + w.u.pTab = pTab; + sqlite3WalkExprList(&w, pEList); + if( w.eCode ){ + w.xExprCallback = sqlite3ReturningSubqueryVarSelect; + w.xSelectCallback = sqlite3SelectWalkNoop; + sqlite3WalkExprList(&w, pEList); + } +} + /* ** Generate code for the RETURNING trigger. Unlike other triggers ** that invoke a subprogram in the bytecode, the code for RETURNING @@ -151497,6 +152766,7 @@ static void codeReturningTrigger( sSelect.pSrc = &sFrom; sFrom.nSrc = 1; sFrom.a[0].pTab = pTab; + sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ sFrom.a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); if( pParse->nErr==0 ){ @@ -151523,6 +152793,7 @@ static void codeReturningTrigger( int i; int nCol = pNew->nExpr; int reg = pParse->nMem+1; + sqlite3ProcessReturningSubqueries(pNew, pTab); pParse->nMem += nCol+2; pReturning->iRetReg = reg; for(i=0; ipUpsertIdx = pIdx; + if( sqlite3UpsertOfIndex(pAll,pIdx)!=pUpsert ){ + /* Really this should be an error. The isDup ON CONFLICT clause will + ** never fire. But this problem was not discovered until three years + ** after multi-CONFLICT upsert was added, and so we silently ignore + ** the problem to prevent breaking applications that might actually + ** have redundant ON CONFLICT clauses. */ + pUpsert->isDup = 1; + } break; } if( pUpsert->pUpsertIdx==0 ){ @@ -153590,9 +154873,13 @@ SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert *pUpsert){ Upsert *pNext; if( NEVER(pUpsert==0) ) return 0; pNext = pUpsert->pNextUpsert; - if( pNext==0 ) return 1; - if( pNext->pUpsertTarget==0 ) return 1; - if( pNext->pUpsertIdx==0 ) return 1; + while( 1 /*exit-by-return*/ ){ + if( pNext==0 ) return 1; + if( pNext->pUpsertTarget==0 ) return 1; + if( pNext->pUpsertIdx==0 ) return 1; + if( !pNext->isDup ) return 0; + pNext = pNext->pNextUpsert; + } return 0; } @@ -154717,6 +156004,8 @@ static int vtabCallConstructor( db->pVtabCtx = &sCtx; pTab->nTabRef++; rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); + assert( pTab!=0 ); + assert( pTab->nTabRef>1 || rc!=SQLITE_OK ); sqlite3DeleteTable(db, pTab); db->pVtabCtx = sCtx.pPrior; if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); @@ -154739,7 +156028,7 @@ static int vtabCallConstructor( pVTable->nRef = 1; if( sCtx.bDeclared==0 ){ const char *zFormat = "vtable constructor did not declare schema: %s"; - *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); + *pzErr = sqlite3MPrintf(db, zFormat, zModuleName); sqlite3VtabUnlock(pVTable); rc = SQLITE_ERROR; }else{ @@ -154917,12 +156206,30 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pTab; Parse sParse; int initBusy; + int i; + const unsigned char *z; + static const u8 aKeyword[] = { TK_CREATE, TK_TABLE, 0 }; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){ return SQLITE_MISUSE_BKPT; } #endif + + /* Verify that the first two keywords in the CREATE TABLE statement + ** really are "CREATE" and "TABLE". If this is not the case, then + ** sqlite3_declare_vtab() is being misused. + */ + z = (const unsigned char*)zCreateTable; + for(i=0; aKeyword[i]; i++){ + int tokenType = 0; + do{ z += sqlite3GetToken(z, &tokenType); }while( tokenType==TK_SPACE ); + if( tokenType!=aKeyword[i] ){ + sqlite3ErrorWithMsg(db, SQLITE_ERROR, "syntax error"); + return SQLITE_ERROR; + } + } + sqlite3_mutex_enter(db->mutex); pCtx = db->pVtabCtx; if( !pCtx || pCtx->bDeclared ){ @@ -154930,6 +156237,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE_BKPT; } + pTab = pCtx->pTab; assert( IsVirtual(pTab) ); @@ -154943,11 +156251,10 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ initBusy = db->init.busy; db->init.busy = 0; sParse.nQueryLoop = 1; - if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) - && ALWAYS(sParse.pNewTable!=0) - && ALWAYS(!db->mallocFailed) - && IsOrdinaryTable(sParse.pNewTable) - ){ + if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) ){ + assert( sParse.pNewTable!=0 ); + assert( !db->mallocFailed ); + assert( IsOrdinaryTable(sParse.pNewTable) ); assert( sParse.zErrMsg==0 ); if( !pTab->aCol ){ Table *pNew = sParse.pNewTable; @@ -157442,6 +158749,27 @@ static SQLITE_NOINLINE void filterPullDown( } } +/* +** Loop pLoop is a WHERE_INDEXED level that uses at least one IN(...) +** operator. Return true if level pLoop is guaranteed to visit only one +** row for each key generated for the index. +*/ +static int whereLoopIsOneRow(WhereLoop *pLoop){ + if( pLoop->u.btree.pIndex->onError + && pLoop->nSkip==0 + && pLoop->u.btree.nEq==pLoop->u.btree.pIndex->nKeyCol + ){ + int ii; + for(ii=0; iiu.btree.nEq; ii++){ + if( pLoop->aLTerm[ii]->eOperator & (WO_IS|WO_ISNULL) ){ + return 0; + } + } + return 1; + } + return 0; +} + /* ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. @@ -157520,7 +158848,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){ pLevel->iLeftJoin = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); - VdbeComment((v, "init LEFT JOIN no-match flag")); + VdbeComment((v, "init LEFT JOIN match flag")); } /* Compute a safe address to jump to if we discover that the table for @@ -158189,7 +159517,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } /* Record the instruction used to terminate the loop. */ - if( pLoop->wsFlags & WHERE_ONEROW ){ + if( (pLoop->wsFlags & WHERE_ONEROW) + || (pLevel->u.in.nIn && regBignull==0 && whereLoopIsOneRow(pLoop)) + ){ pLevel->op = OP_Noop; }else if( bRev ){ pLevel->op = OP_Prev; @@ -158579,6 +159909,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** iLoop==3: Code all remaining expressions. ** ** An effort is made to skip unnecessary iterations of the loop. + ** + ** This optimization of causing simple query restrictions to occur before + ** more complex one is call the "push-down" optimization in MySQL. Here + ** in SQLite, the name is "MySQL push-down", since there is also another + ** totally unrelated optimization called "WHERE-clause push-down". + ** Sometimes the qualifier is omitted, resulting in an ambiguity, so beware. */ iLoop = (pIdx ? 1 : 2); do{ @@ -158829,7 +160165,16 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( pRJ->regReturn); for(k=0; ka[k].pWLoop->iTab == pWInfo->a[k].iFrom ); + pRight = &pWInfo->pTabList->a[pWInfo->a[k].iFrom]; mAll |= pWInfo->a[k].pWLoop->maskSelf; + if( pRight->fg.viaCoroutine ){ + sqlite3VdbeAddOp3( + v, OP_Null, 0, pRight->regResult, + pRight->regResult + pRight->pSelect->pEList->nExpr-1 + ); + } sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur); iIdxCur = pWInfo->a[k].iIdxCur; if( iIdxCur ){ @@ -159886,7 +161231,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2( if( pIdx->aiColumn[i]!=XN_EXPR ) continue; assert( pIdx->bHasExpr ); if( sqlite3ExprCompareSkip(pExpr,pIdx->aColExpr->a[i].pExpr,iCur)==0 - && pExpr->op!=TK_STRING + && !sqlite3ExprIsConstant(0,pIdx->aColExpr->a[i].pExpr) ){ aiCurCol[0] = iCur; aiCurCol[1] = XN_EXPR; @@ -160535,6 +161880,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec continue; } if( pWC->a[ii].leftCursor!=iCsr ) return; + if( pWC->a[ii].prereqRight!=0 ) return; } /* Check condition (5). Return early if it is not met. */ @@ -160549,12 +161895,14 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec /* All conditions are met. Add the terms to the where-clause object. */ assert( p->pLimit->op==TK_LIMIT ); - whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, - iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); - if( p->iOffset>0 ){ + if( p->iOffset!=0 && (p->selFlags & SF_Compound)==0 ){ whereAddLimitExpr(pWC, p->iOffset, p->pLimit->pRight, iCsr, SQLITE_INDEX_CONSTRAINT_OFFSET); } + if( p->iOffset==0 || (p->selFlags & SF_Compound)==0 ){ + whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, + iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); + } } } @@ -161072,6 +162420,42 @@ static Expr *whereRightSubexprIsColumn(Expr *p){ return 0; } +/* +** Term pTerm is guaranteed to be a WO_IN term. It may be a component term +** of a vector IN expression of the form "(x, y, ...) IN (SELECT ...)". +** This function checks to see if the term is compatible with an index +** column with affinity idxaff (one of the SQLITE_AFF_XYZ values). If so, +** it returns a pointer to the name of the collation sequence (e.g. "BINARY" +** or "NOCASE") used by the comparison in pTerm. If it is not compatible +** with affinity idxaff, NULL is returned. +*/ +static SQLITE_NOINLINE const char *indexInAffinityOk( + Parse *pParse, + WhereTerm *pTerm, + u8 idxaff +){ + Expr *pX = pTerm->pExpr; + Expr inexpr; + + assert( pTerm->eOperator & WO_IN ); + + if( sqlite3ExprIsVector(pX->pLeft) ){ + int iField = pTerm->u.x.iField - 1; + inexpr.flags = 0; + inexpr.op = TK_EQ; + inexpr.pLeft = pX->pLeft->x.pList->a[iField].pExpr; + assert( ExprUseXSelect(pX) ); + inexpr.pRight = pX->x.pSelect->pEList->a[iField].pExpr; + pX = &inexpr; + } + + if( sqlite3IndexAffinityOk(pX, idxaff) ){ + CollSeq *pRet = sqlite3ExprCompareCollSeq(pParse, pX); + return pRet ? pRet->zName : sqlite3StrBINARY; + } + return 0; +} + /* ** Advance to the next WhereTerm that matches according to the criteria ** established when the pScan object was initialized by whereScanInit(). @@ -161122,16 +162506,24 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ if( (pTerm->eOperator & pScan->opMask)!=0 ){ /* Verify the affinity and collating sequence match */ if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){ - CollSeq *pColl; + const char *zCollName; Parse *pParse = pWC->pWInfo->pParse; pX = pTerm->pExpr; - if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ - continue; + + if( (pTerm->eOperator & WO_IN) ){ + zCollName = indexInAffinityOk(pParse, pTerm, pScan->idxaff); + if( !zCollName ) continue; + }else{ + CollSeq *pColl; + if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ + continue; + } + assert(pX->pLeft); + pColl = sqlite3ExprCompareCollSeq(pParse, pX); + zCollName = pColl ? pColl->zName : sqlite3StrBINARY; } - assert(pX->pLeft); - pColl = sqlite3ExprCompareCollSeq(pParse, pX); - if( pColl==0 ) pColl = pParse->db->pDfltColl; - if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){ + + if( sqlite3StrICmp(zCollName, pScan->zCollName) ){ continue; } } @@ -161483,9 +162875,13 @@ static void translateColumnToCopy( ** are no-ops. */ #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) -static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ +static void whereTraceIndexInfoInputs( + sqlite3_index_info *p, /* The IndexInfo object */ + Table *pTab /* The TABLE that is the virtual table */ +){ int i; if( (sqlite3WhereTrace & 0x10)==0 ) return; + sqlite3DebugPrintf("sqlite3_index_info inputs for %s:\n", pTab->zName); for(i=0; inConstraint; i++){ sqlite3DebugPrintf( " constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n", @@ -161503,9 +162899,13 @@ static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ p->aOrderBy[i].desc); } } -static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ +static void whereTraceIndexInfoOutputs( + sqlite3_index_info *p, /* The IndexInfo object */ + Table *pTab /* The TABLE that is the virtual table */ +){ int i; if( (sqlite3WhereTrace & 0x10)==0 ) return; + sqlite3DebugPrintf("sqlite3_index_info outputs for %s:\n", pTab->zName); for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i, @@ -161519,8 +162919,8 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows); } #else -#define whereTraceIndexInfoInputs(A) -#define whereTraceIndexInfoOutputs(A) +#define whereTraceIndexInfoInputs(A,B) +#define whereTraceIndexInfoOutputs(A,B) #endif /* @@ -161704,7 +163104,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** WHERE clause (or the ON clause of a LEFT join) that constrain which ** rows of the target table (pSrc) that can be used. */ if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom, 0) ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); @@ -161746,7 +163146,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** if they go out of sync. */ if( IsView(pTable) ){ - extraCols = ALLBITS; + extraCols = ALLBITS & ~idxCols; }else{ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); } @@ -161973,7 +163373,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( for(pTerm=pWInfo->sWC.a; pTermpExpr; if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc, 0) ){ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); } @@ -162099,7 +163499,7 @@ static sqlite3_index_info *allocateIndexInfo( Expr *pE2; /* Skip over constant terms in the ORDER BY clause */ - if( sqlite3ExprIsConstant(pExpr) ){ + if( sqlite3ExprIsConstant(0, pExpr) ){ continue; } @@ -162134,7 +163534,7 @@ static sqlite3_index_info *allocateIndexInfo( } if( i==n ){ nOrderBy = n; - if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) ){ + if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) && !pSrc->fg.rowidUsed ){ eDistinct = 2 + ((pWInfo->wctrlFlags & WHERE_SORTBYGROUP)!=0); }else if( pWInfo->wctrlFlags & WHERE_GROUPBY ){ eDistinct = 1; @@ -162211,7 +163611,7 @@ static sqlite3_index_info *allocateIndexInfo( pIdxInfo->nConstraint = j; for(i=j=0; ia[i].pExpr; - if( sqlite3ExprIsConstant(pExpr) ) continue; + if( sqlite3ExprIsConstant(0, pExpr) ) continue; assert( pExpr->op==TK_COLUMN || (pExpr->op==TK_COLLATE && pExpr->pLeft->op==TK_COLUMN && pExpr->iColumn==pExpr->pLeft->iColumn) ); @@ -162263,11 +163663,11 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int rc; - whereTraceIndexInfoInputs(p); + whereTraceIndexInfoInputs(p, pTab); pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); pParse->db->nSchemaLock--; - whereTraceIndexInfoOutputs(p); + whereTraceIndexInfoOutputs(p, pTab); if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT ){ if( rc==SQLITE_NOMEM ){ @@ -163745,7 +165145,9 @@ static int whereLoopAddBtreeIndex( } if( pProbe->bUnordered || pProbe->bLowQual ){ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS); + if( pProbe->bLowQual && pSrc->fg.isIndexedBy==0 ){ + opMask &= ~(WO_EQ|WO_IN|WO_IS); + } } assert( pNew->u.btree.nEqnColumn ); @@ -164012,10 +165414,13 @@ static int whereLoopAddBtreeIndex( } } - /* Set rCostIdx to the cost of visiting selected rows in index. Add - ** it to pNew->rRun, which is currently set to the cost of the index - ** seek only. Then, if this is a non-covering index, add the cost of - ** visiting the rows in the main table. */ + /* Set rCostIdx to the estimated cost of visiting selected rows in the + ** index. The estimate is the sum of two values: + ** 1. The cost of doing one search-by-key to find the first matching + ** entry + ** 2. Stepping forward in the index pNew->nOut times to find all + ** additional matching entries. + */ assert( pSrc->pTab->szTabRow>0 ); if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ /* The pProbe->szIdxRow is low for an IPK table since the interior @@ -164026,7 +165431,15 @@ static int whereLoopAddBtreeIndex( }else{ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; } - pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); + rCostIdx = sqlite3LogEstAdd(rLogSize, rCostIdx); + + /* Estimate the cost of running the loop. If all data is coming + ** from the index, then this is just the cost of doing the index + ** lookup and scan. But if some data is coming out of the main table, + ** we also have to add in the cost of doing pNew->nOut searches to + ** locate the row in the main table that corresponds to the index entry. + */ + pNew->rRun = rCostIdx; if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK|WHERE_EXPRIDX))==0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); } @@ -164132,7 +165545,9 @@ static int indexMightHelpWithOrderBy( for(ii=0; iinExpr; ii++){ Expr *pExpr = sqlite3ExprSkipCollateAndLikely(pOB->a[ii].pExpr); if( NEVER(pExpr==0) ) continue; - if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){ + if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN) + && pExpr->iTable==iCursor + ){ if( pExpr->iColumn<0 ) return 1; for(jj=0; jjnKeyCol; jj++){ if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1; @@ -164389,7 +165804,7 @@ static void wherePartIdxExpr( u8 aff; if( pLeft->op!=TK_COLUMN ) return; - if( !sqlite3ExprIsConstant(pRight) ) return; + if( !sqlite3ExprIsConstant(0, pRight) ) return; if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return; if( pLeft->iColumn<0 ) return; aff = pIdx->pTable->aCol[pLeft->iColumn].affinity; @@ -164662,7 +166077,9 @@ static int whereLoopAddBtree( " according to whereIsCoveringIndex()\n", pProbe->zName)); } } - }else if( m==0 ){ + }else if( m==0 + && (HasRowid(pTab) || pWInfo->pSelect!=0 || sqlite3FaultSim(700)) + ){ WHERETRACE(0x200, ("-> %s a covering index according to bitmasks\n", pProbe->zName, m==0 ? "is" : "is not")); @@ -164738,7 +166155,7 @@ static int whereLoopAddBtree( ** unique index is used (making the index functionally non-unique) ** then the sqlite_stat1 data becomes important for scoring the ** plan */ - pTab->tabFlags |= TF_StatsUsed; + pTab->tabFlags |= TF_MaybeReanalyze; } #ifdef SQLITE_ENABLE_STAT4 sqlite3Stat4ProbeFree(pBuilder->pRec); @@ -164760,6 +166177,21 @@ static int isLimitTerm(WhereTerm *pTerm){ && pTerm->eMatchOp<=SQLITE_INDEX_CONSTRAINT_OFFSET; } +/* +** Return true if the first nCons constraints in the pUsage array are +** marked as in-use (have argvIndex>0). False otherwise. +*/ +static int allConstraintsUsed( + struct sqlite3_index_constraint_usage *aUsage, + int nCons +){ + int ii; + for(ii=0; iipNew->iTab. This @@ -164900,13 +166332,20 @@ static int whereLoopAddVirtualOne( *pbIn = 1; assert( (mExclude & WO_IN)==0 ); } + /* Unless pbRetryLimit is non-NULL, there should be no LIMIT/OFFSET + ** terms. And if there are any, they should follow all other terms. */ assert( pbRetryLimit || !isLimitTerm(pTerm) ); - if( isLimitTerm(pTerm) && *pbIn ){ + assert( !isLimitTerm(pTerm) || i>=nConstraint-2 ); + assert( !isLimitTerm(pTerm) || i==nConstraint-1 || isLimitTerm(pTerm+1) ); + + if( isLimitTerm(pTerm) && (*pbIn || !allConstraintsUsed(pUsage, i)) ){ /* If there is an IN(...) term handled as an == (separate call to ** xFilter for each value on the RHS of the IN) and a LIMIT or - ** OFFSET term handled as well, the plan is unusable. Set output - ** variable *pbRetryLimit to true to tell the caller to retry with - ** LIMIT and OFFSET disabled. */ + ** OFFSET term handled as well, the plan is unusable. Similarly, + ** if there is a LIMIT/OFFSET and there are other unused terms, + ** the plan cannot be used. In these cases set variable *pbRetryLimit + ** to true to tell the caller to retry with LIMIT and OFFSET + ** disabled. */ if( pIdxInfo->needToFreeIdxStr ){ sqlite3_free(pIdxInfo->idxStr); pIdxInfo->idxStr = 0; @@ -165763,7 +167202,7 @@ static i8 wherePathSatisfiesOrderBy( if( MASKBIT(i) & obSat ) continue; p = pOrderBy->a[i].pExpr; mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p); - if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue; + if( mTerm==0 && !sqlite3ExprIsConstant(0,p) ) continue; if( (mTerm&~orderDistinctMask)==0 ){ obSat |= MASKBIT(i); } @@ -166232,10 +167671,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } - if( pWInfo->pSelect->pOrderBy - && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){ - pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr; - } + /* vvv--- See check-in [12ad822d9b827777] on 2023-03-16 ---vvv */ + assert( pWInfo->pSelect->pOrderBy==0 + || pWInfo->nOBSat <= pWInfo->pSelect->pOrderBy->nExpr ); }else{ pWInfo->revMask = pFrom->revLoop; if( pWInfo->nOBSat<=0 ){ @@ -166278,7 +167716,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } } - pWInfo->nRowOut = pFrom->nRow; /* Free temporary memory and return success */ @@ -166286,6 +167723,83 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ return SQLITE_OK; } +/* +** This routine implements a heuristic designed to improve query planning. +** This routine is called in between the first and second call to +** wherePathSolver(). Hence the name "Interstage" "Heuristic". +** +** The first call to wherePathSolver() (hereafter just "solver()") computes +** the best path without regard to the order of the outputs. The second call +** to the solver() builds upon the first call to try to find an alternative +** path that satisfies the ORDER BY clause. +** +** This routine looks at the results of the first solver() run, and for +** every FROM clause term in the resulting query plan that uses an equality +** constraint against an index, disable other WhereLoops for that same +** FROM clause term that would try to do a full-table scan. This prevents +** an index search from being converted into a full-table scan in order to +** satisfy an ORDER BY clause, since even though we might get slightly better +** performance using the full-scan without sorting if the output size +** estimates are very precise, we might also get severe performance +** degradation using the full-scan if the output size estimate is too large. +** It is better to err on the side of caution. +** +** Except, if the first solver() call generated a full-table scan in an outer +** loop then stop this analysis at the first full-scan, since the second +** solver() run might try to swap that full-scan for another in order to +** get the output into the correct order. In other words, we allow a +** rewrite like this: +** +** First Solver() Second Solver() +** |-- SCAN t1 |-- SCAN t2 +** |-- SEARCH t2 `-- SEARCH t1 +** `-- SORT USING B-TREE +** +** The purpose of this routine is to disallow rewrites such as: +** +** First Solver() Second Solver() +** |-- SEARCH t1 |-- SCAN t2 <--- bad! +** |-- SEARCH t2 `-- SEARCH t1 +** `-- SORT USING B-TREE +** +** See test cases in test/whereN.test for the real-world query that +** originally provoked this heuristic. +*/ +static SQLITE_NOINLINE void whereInterstageHeuristic(WhereInfo *pWInfo){ + int i; +#ifdef WHERETRACE_ENABLED + int once = 0; +#endif + for(i=0; inLevel; i++){ + WhereLoop *p = pWInfo->a[i].pWLoop; + if( p==0 ) break; + if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 ) continue; + if( (p->wsFlags & (WHERE_COLUMN_EQ|WHERE_COLUMN_NULL|WHERE_COLUMN_IN))!=0 ){ + u8 iTab = p->iTab; + WhereLoop *pLoop; + for(pLoop=pWInfo->pLoops; pLoop; pLoop=pLoop->pNextLoop){ + if( pLoop->iTab!=iTab ) continue; + if( (pLoop->wsFlags & (WHERE_CONSTRAINT|WHERE_AUTO_INDEX))!=0 ){ + /* Auto-index and index-constrained loops allowed to remain */ + continue; + } +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x80 ){ + if( once==0 ){ + sqlite3DebugPrintf("Loops disabled by interstage heuristic:\n"); + once = 1; + } + sqlite3WhereLoopPrint(pLoop, &pWInfo->sWC); + } +#endif /* WHERETRACE_ENABLED */ + pLoop->prereq = ALLBITS; /* Prevent 2nd solver() from using this one */ + } + }else{ + break; + } + } +} + /* ** Most queries use only a single table (they are not joins) and have ** simple == constraints against indexed fields. This routine attempts @@ -166454,6 +167968,10 @@ static void showAllWhereLoops(WhereInfo *pWInfo, WhereClause *pWC){ ** the right-most table of a subquery that was flattened into the ** main query and that subquery was the right-hand operand of an ** inner join that held an ON or USING clause. +** 6) The ORDER BY clause has 63 or fewer terms +** 7) The omit-noop-join optimization is enabled. +** +** Items (1), (6), and (7) are checked by the caller. ** ** For example, given: ** @@ -166574,7 +168092,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; Table *pTab = pItem->pTab; if( (pTab->tabFlags & TF_HasStat1)==0 ) break; - pTab->tabFlags |= TF_StatsUsed; + pTab->tabFlags |= TF_MaybeReanalyze; if( i>=1 && (pLoop->wsFlags & reqFlags)==reqFlags /* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */ @@ -166595,6 +168113,58 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } +/* +** Expression Node callback for sqlite3ExprCanReturnSubtype(). +** +** Only a function call is able to return a subtype. So if the node +** is not a function call, return WRC_Prune immediately. +** +** A function call is able to return a subtype if it has the +** SQLITE_RESULT_SUBTYPE property. +** +** Assume that every function is able to pass-through a subtype from +** one of its argument (using sqlite3_result_value()). Most functions +** are not this way, but we don't have a mechanism to distinguish those +** that are from those that are not, so assume they all work this way. +** That means that if one of its arguments is another function and that +** other function is able to return a subtype, then this function is +** able to return a subtype. +*/ +static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ + int n; + FuncDef *pDef; + sqlite3 *db; + if( pExpr->op!=TK_FUNCTION ){ + return WRC_Prune; + } + assert( ExprUseXList(pExpr) ); + db = pWalker->pParse->db; + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + pWalker->eCode = 1; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Return TRUE if expression pExpr is able to return a subtype. +** +** A TRUE return does not guarantee that a subtype will be returned. +** It only indicates that a subtype return is possible. False positives +** are acceptable as they only disable an optimization. False negatives, +** on the other hand, can lead to incorrect answers. +*/ +static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = exprNodeCanReturnSubtype; + sqlite3WalkExpr(&w, pExpr); + return w.eCode; +} + /* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor @@ -166620,33 +168190,19 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( for(i=0; inColumn; i++){ Expr *pExpr; int j = pIdx->aiColumn[i]; - int bMaybeNullRow; if( j==XN_EXPR ){ pExpr = pIdx->aColExpr->a[i].pExpr; - testcase( pTabItem->fg.jointype & JT_LEFT ); - testcase( pTabItem->fg.jointype & JT_RIGHT ); - testcase( pTabItem->fg.jointype & JT_LTORJ ); - bMaybeNullRow = (pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0; }else if( j>=0 && (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)!=0 ){ pExpr = sqlite3ColumnExpr(pTab, &pTab->aCol[j]); - bMaybeNullRow = 0; }else{ continue; } - if( sqlite3ExprIsConstant(pExpr) ) continue; - if( pExpr->op==TK_FUNCTION ){ + if( sqlite3ExprIsConstant(0,pExpr) ) continue; + if( pExpr->op==TK_FUNCTION && sqlite3ExprCanReturnSubtype(pParse,pExpr) ){ /* Functions that might set a subtype should not be replaced by the ** value taken from an expression index since the index omits the ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ - int n; - FuncDef *pDef; - sqlite3 *db = pParse->db; - assert( ExprUseXList(pExpr) ); - n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; - pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); - if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ - continue; - } + continue; } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; @@ -166661,7 +168217,7 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( p->iDataCur = pTabItem->iCursor; p->iIdxCur = iIdxCur; p->iIdxCol = i; - p->bMaybeNullRow = bMaybeNullRow; + p->bMaybeNullRow = (pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0; if( sqlite3IndexAffinityStr(pParse->db, pIdx) ){ p->aff = pIdx->zColAff[i]; } @@ -166829,6 +168385,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pOrderBy && pOrderBy->nExpr>=BMS ){ pOrderBy = 0; wctrlFlags &= ~WHERE_WANT_DISTINCT; + wctrlFlags |= WHERE_KEEP_ALL_JOINS; /* Disable omit-noop-join opt */ } /* The number of tables in the FROM clause is limited by the number of @@ -166911,7 +168468,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } - ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); + if( ALWAYS(pWInfo->pSelect) + && (pWInfo->pSelect->selFlags & SF_MultiValue)==0 + ){ + ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); + } }else{ /* Assign a bit from the bitmask to every term in the FROM clause. ** @@ -167064,6 +168625,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wherePathSolver(pWInfo, 0); if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ + whereInterstageHeuristic(pWInfo); wherePathSolver(pWInfo, pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } @@ -167124,10 +168686,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** in-line sqlite3WhereCodeOneLoopStart() for performance reasons. */ notReady = ~(Bitmask)0; - if( pWInfo->nLevel>=2 - && pResultSet!=0 /* these two combine to guarantee */ - && 0==(wctrlFlags & WHERE_AGG_DISTINCT) /* condition (1) above */ - && OptimizationEnabled(db, SQLITE_OmitNoopJoin) + if( pWInfo->nLevel>=2 /* Must be a join, or this opt8n is pointless */ + && pResultSet!=0 /* Condition (1) */ + && 0==(wctrlFlags & (WHERE_AGG_DISTINCT|WHERE_KEEP_ALL_JOINS)) /* (1),(6) */ + && OptimizationEnabled(db, SQLITE_OmitNoopJoin) /* (7) */ ){ notReady = whereOmitNoopJoin(pWInfo, notReady); nTabList = pWInfo->nLevel; @@ -167447,26 +169009,6 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } #endif -#ifdef SQLITE_DEBUG -/* -** Return true if cursor iCur is opened by instruction k of the -** bytecode. Used inside of assert() only. -*/ -static int cursorIsOpen(Vdbe *v, int iCur, int k){ - while( k>=0 ){ - VdbeOp *pOp = sqlite3VdbeGetOp(v,k--); - if( pOp->p1!=iCur ) continue; - if( pOp->opcode==OP_Close ) return 0; - if( pOp->opcode==OP_OpenRead ) return 1; - if( pOp->opcode==OP_OpenWrite ) return 1; - if( pOp->opcode==OP_OpenDup ) return 1; - if( pOp->opcode==OP_OpenAutoindex ) return 1; - if( pOp->opcode==OP_OpenEphemeral ) return 1; - } - return 0; -} -#endif /* SQLITE_DEBUG */ - /* ** Generate the end of the WHERE loop. See comments on ** sqlite3WhereBegin() for additional information. @@ -167613,7 +169155,15 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v); assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 ); if( (ws & WHERE_IDX_ONLY)==0 ){ - assert( pLevel->iTabCur==pTabList->a[pLevel->iFrom].iCursor ); + SrcItem *pSrc = &pTabList->a[pLevel->iFrom]; + assert( pLevel->iTabCur==pSrc->iCursor ); + if( pSrc->fg.viaCoroutine ){ + int m, n; + n = pSrc->regResult; + assert( pSrc->pTab!=0 ); + m = pSrc->pTab->nCol; + sqlite3VdbeAddOp3(v, OP_Null, 0, n, n+m-1); + } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); } if( (ws & WHERE_INDEXED) @@ -167663,6 +169213,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ if( pTabItem->fg.viaCoroutine ){ testcase( pParse->db->mallocFailed ); + assert( pTabItem->regResult>=0 ); translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur, pTabItem->regResult, 0); continue; @@ -167757,16 +169308,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ** reference. Verify that this is harmless - that the ** table being referenced really is open. */ -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || cursorIsOpen(v,pOp->p1,k) - || pOp->opcode==OP_Offset - ); -#else - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || cursorIsOpen(v,pOp->p1,k) - ); -#endif + if( pLoop->wsFlags & WHERE_IDX_ONLY ){ + sqlite3ErrorMsg(pParse, "internal query planner error"); + pParse->rc = SQLITE_INTERNAL; + } } }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; @@ -168967,7 +170512,7 @@ SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p){ ** variable values in the expression tree. */ static Expr *sqlite3WindowOffsetExpr(Parse *pParse, Expr *pExpr){ - if( 0==sqlite3ExprIsConstant(pExpr) ){ + if( 0==sqlite3ExprIsConstant(0,pExpr) ){ if( IN_RENAME_OBJECT ) sqlite3RenameExprUnmap(pParse, pExpr); sqlite3ExprDelete(pParse->db, pExpr); pExpr = sqlite3ExprAlloc(pParse->db, TK_NULL, 0, 0); @@ -171037,9 +172582,9 @@ static void updateDeleteLimitError( break; } } - if( (p->selFlags & SF_MultiValue)==0 && - (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && - cnt>mxSelect + if( (p->selFlags & (SF_MultiValue|SF_Values))==0 + && (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 + && cnt>mxSelect ){ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); } @@ -171059,6 +172604,14 @@ static void updateDeleteLimitError( return pSelect; } + /* Memory allocator for parser stack resizing. This is a thin wrapper around + ** sqlite3_realloc() that includes a call to sqlite3FaultSim() to facilitate + ** testing. + */ + static void *parserStackRealloc(void *pOld, sqlite3_uint64 newSize){ + return sqlite3FaultSim(700) ? 0 : sqlite3_realloc(pOld, newSize); + } + /* Construct a new Expr object from a single token */ static Expr *tokenExpr(Parse *pParse, int op, Token t){ @@ -171308,8 +172861,8 @@ static void updateDeleteLimitError( #define TK_TRUEFALSE 170 #define TK_ISNOT 171 #define TK_FUNCTION 172 -#define TK_UMINUS 173 -#define TK_UPLUS 174 +#define TK_UPLUS 173 +#define TK_UMINUS 174 #define TK_TRUTH 175 #define TK_REGISTER 176 #define TK_VECTOR 177 @@ -171318,8 +172871,9 @@ static void updateDeleteLimitError( #define TK_ASTERISK 180 #define TK_SPAN 181 #define TK_ERROR 182 -#define TK_SPACE 183 -#define TK_ILLEGAL 184 +#define TK_QNUMBER 183 +#define TK_SPACE 184 +#define TK_ILLEGAL 185 #endif /**************** End token definitions ***************************************/ @@ -171360,6 +172914,9 @@ static void updateDeleteLimitError( ** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser ** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser ** sqlite3ParserCTX_* As sqlite3ParserARG_ except for %extra_context +** YYREALLOC Name of the realloc() function to use +** YYFREE Name of the free() function to use +** YYDYNSTACK True if stack space should be extended on heap ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -171373,37 +172930,39 @@ static void updateDeleteLimitError( ** YY_NO_ACTION The yy_action[] code for no-op ** YY_MIN_REDUCE Minimum value for reduce actions ** YY_MAX_REDUCE Maximum value for reduce actions +** YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** YY_MAX_DSTRCTR Maximum symbol value that has a destructor */ #ifndef INTERFACE # define INTERFACE 1 #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 319 +#define YYNOCODE 322 #define YYACTIONTYPE unsigned short int #define YYWILDCARD 101 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - TriggerStep* yy33; - Window* yy41; - Select* yy47; - SrcList* yy131; - struct TrigEvent yy180; - struct {int value; int mask;} yy231; - IdList* yy254; - u32 yy285; - ExprList* yy322; - Cte* yy385; - int yy394; - Upsert* yy444; - u8 yy516; - With* yy521; - const char* yy522; - Expr* yy528; - OnOrUsing yy561; - struct FrameBound yy595; + ExprList* yy14; + With* yy59; + Cte* yy67; + Upsert* yy122; + IdList* yy132; + int yy144; + const char* yy168; + SrcList* yy203; + Window* yy211; + OnOrUsing yy269; + struct TrigEvent yy286; + struct {int value; int mask;} yy383; + u32 yy391; + TriggerStep* yy427; + Expr* yy454; + u8 yy462; + struct FrameBound yy509; + Select* yy555; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -171413,24 +172972,29 @@ typedef union { #define sqlite3ParserARG_PARAM #define sqlite3ParserARG_FETCH #define sqlite3ParserARG_STORE +#define YYREALLOC parserStackRealloc +#define YYFREE sqlite3_free +#define YYDYNSTACK 1 #define sqlite3ParserCTX_SDECL Parse *pParse; #define sqlite3ParserCTX_PDECL ,Parse *pParse #define sqlite3ParserCTX_PARAM ,pParse #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 579 -#define YYNRULE 405 -#define YYNRULE_WITH_ACTION 340 -#define YYNTOKEN 185 -#define YY_MAX_SHIFT 578 -#define YY_MIN_SHIFTREDUCE 838 -#define YY_MAX_SHIFTREDUCE 1242 -#define YY_ERROR_ACTION 1243 -#define YY_ACCEPT_ACTION 1244 -#define YY_NO_ACTION 1245 -#define YY_MIN_REDUCE 1246 -#define YY_MAX_REDUCE 1650 +#define YYNSTATE 583 +#define YYNRULE 409 +#define YYNRULE_WITH_ACTION 344 +#define YYNTOKEN 186 +#define YY_MAX_SHIFT 582 +#define YY_MIN_SHIFTREDUCE 845 +#define YY_MAX_SHIFTREDUCE 1253 +#define YY_ERROR_ACTION 1254 +#define YY_ACCEPT_ACTION 1255 +#define YY_NO_ACTION 1256 +#define YY_MIN_REDUCE 1257 +#define YY_MAX_REDUCE 1665 +#define YY_MIN_DSTRCTR 205 +#define YY_MAX_DSTRCTR 319 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -171446,6 +173010,22 @@ typedef union { # define yytestcase(X) #endif +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if YYSTACKDEPTH<=0 || YYDYNSTACK +# define YYGROWABLESTACK 1 +#else +# define YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if YYSTACKDEPTH<=0 +# undef YYSTACKDEPTH +# define YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement @@ -171497,619 +173077,630 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2100) +#define YY_ACTTAB_COUNT (2142) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 572, 210, 572, 119, 116, 231, 572, 119, 116, 231, - /* 10 */ 572, 1317, 379, 1296, 410, 566, 566, 566, 572, 411, - /* 20 */ 380, 1317, 1279, 42, 42, 42, 42, 210, 1529, 72, - /* 30 */ 72, 974, 421, 42, 42, 495, 305, 281, 305, 975, - /* 40 */ 399, 72, 72, 126, 127, 81, 1217, 1217, 1054, 1057, - /* 50 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 480, 411, - /* 60 */ 1244, 1, 1, 578, 2, 1248, 554, 119, 116, 231, - /* 70 */ 319, 484, 147, 484, 528, 119, 116, 231, 533, 1330, - /* 80 */ 419, 527, 143, 126, 127, 81, 1217, 1217, 1054, 1057, - /* 90 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 119, 116, - /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121, - /* 110 */ 121, 120, 117, 448, 286, 286, 286, 286, 446, 446, - /* 120 */ 446, 1568, 378, 1570, 1193, 377, 1164, 569, 1164, 569, - /* 130 */ 411, 1568, 541, 261, 228, 448, 102, 146, 453, 318, - /* 140 */ 563, 242, 123, 123, 123, 123, 122, 122, 121, 121, - /* 150 */ 121, 120, 117, 448, 126, 127, 81, 1217, 1217, 1054, - /* 160 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 143, - /* 170 */ 296, 1193, 341, 452, 121, 121, 121, 120, 117, 448, - /* 180 */ 128, 1193, 1194, 1193, 149, 445, 444, 572, 120, 117, - /* 190 */ 448, 125, 125, 125, 125, 118, 123, 123, 123, 123, - /* 200 */ 122, 122, 121, 121, 121, 120, 117, 448, 458, 114, - /* 210 */ 13, 13, 550, 123, 123, 123, 123, 122, 122, 121, - /* 220 */ 121, 121, 120, 117, 448, 424, 318, 563, 1193, 1194, - /* 230 */ 1193, 150, 1225, 411, 1225, 125, 125, 125, 125, 123, - /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, - /* 250 */ 448, 469, 344, 1041, 1041, 1055, 1058, 126, 127, 81, - /* 260 */ 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, - /* 270 */ 125, 125, 1282, 526, 224, 1193, 572, 411, 226, 519, - /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121, - /* 290 */ 121, 121, 120, 117, 448, 1010, 16, 16, 1193, 134, - /* 300 */ 134, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, - /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123, - /* 320 */ 122, 122, 121, 121, 121, 120, 117, 448, 1045, 550, - /* 330 */ 1193, 375, 1193, 1194, 1193, 254, 1438, 401, 508, 505, - /* 340 */ 504, 112, 564, 570, 4, 929, 929, 435, 503, 342, - /* 350 */ 464, 330, 362, 396, 1238, 1193, 1194, 1193, 567, 572, - /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120, - /* 370 */ 117, 448, 286, 286, 371, 1581, 1607, 445, 444, 155, - /* 380 */ 411, 449, 72, 72, 1289, 569, 1222, 1193, 1194, 1193, - /* 390 */ 86, 1224, 273, 561, 547, 520, 520, 572, 99, 1223, - /* 400 */ 6, 1281, 476, 143, 126, 127, 81, 1217, 1217, 1054, - /* 410 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 554, - /* 420 */ 13, 13, 1031, 511, 1225, 1193, 1225, 553, 110, 110, - /* 430 */ 224, 572, 1239, 177, 572, 429, 111, 199, 449, 573, - /* 440 */ 449, 432, 1555, 1019, 327, 555, 1193, 272, 289, 370, - /* 450 */ 514, 365, 513, 259, 72, 72, 547, 72, 72, 361, - /* 460 */ 318, 563, 1613, 123, 123, 123, 123, 122, 122, 121, - /* 470 */ 121, 121, 120, 117, 448, 1019, 1019, 1021, 1022, 28, - /* 480 */ 286, 286, 1193, 1194, 1193, 1159, 572, 1612, 411, 904, - /* 490 */ 192, 554, 358, 569, 554, 940, 537, 521, 1159, 437, - /* 500 */ 415, 1159, 556, 1193, 1194, 1193, 572, 548, 548, 52, - /* 510 */ 52, 216, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, - /* 520 */ 1044, 124, 124, 125, 125, 125, 125, 1193, 478, 136, - /* 530 */ 136, 411, 286, 286, 1493, 509, 122, 122, 121, 121, - /* 540 */ 121, 120, 117, 448, 1010, 569, 522, 219, 545, 545, - /* 550 */ 318, 563, 143, 6, 536, 126, 127, 81, 1217, 1217, - /* 560 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, - /* 570 */ 1557, 123, 123, 123, 123, 122, 122, 121, 121, 121, - /* 580 */ 120, 117, 448, 489, 1193, 1194, 1193, 486, 283, 1270, - /* 590 */ 960, 254, 1193, 375, 508, 505, 504, 1193, 342, 574, - /* 600 */ 1193, 574, 411, 294, 503, 960, 879, 193, 484, 318, - /* 610 */ 563, 386, 292, 382, 123, 123, 123, 123, 122, 122, - /* 620 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, - /* 630 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 640 */ 125, 411, 396, 1139, 1193, 872, 101, 286, 286, 1193, - /* 650 */ 1194, 1193, 375, 1096, 1193, 1194, 1193, 1193, 1194, 1193, - /* 660 */ 569, 459, 33, 375, 235, 126, 127, 81, 1217, 1217, - /* 670 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, - /* 680 */ 1437, 962, 572, 230, 961, 123, 123, 123, 123, 122, - /* 690 */ 122, 121, 121, 121, 120, 117, 448, 1159, 230, 1193, - /* 700 */ 158, 1193, 1194, 1193, 1556, 13, 13, 303, 960, 1233, - /* 710 */ 1159, 154, 411, 1159, 375, 1584, 1177, 5, 371, 1581, - /* 720 */ 431, 1239, 3, 960, 123, 123, 123, 123, 122, 122, - /* 730 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, - /* 740 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 750 */ 125, 411, 210, 571, 1193, 1032, 1193, 1194, 1193, 1193, - /* 760 */ 390, 855, 156, 1555, 376, 404, 1101, 1101, 492, 572, - /* 770 */ 469, 344, 1322, 1322, 1555, 126, 127, 81, 1217, 1217, - /* 780 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, - /* 790 */ 130, 572, 13, 13, 532, 123, 123, 123, 123, 122, - /* 800 */ 122, 121, 121, 121, 120, 117, 448, 304, 572, 457, - /* 810 */ 229, 1193, 1194, 1193, 13, 13, 1193, 1194, 1193, 1300, - /* 820 */ 467, 1270, 411, 1320, 1320, 1555, 1015, 457, 456, 436, - /* 830 */ 301, 72, 72, 1268, 123, 123, 123, 123, 122, 122, - /* 840 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, - /* 850 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 860 */ 125, 411, 384, 1076, 1159, 286, 286, 421, 314, 280, - /* 870 */ 280, 287, 287, 461, 408, 407, 1539, 1159, 569, 572, - /* 880 */ 1159, 1196, 569, 409, 569, 126, 127, 81, 1217, 1217, - /* 890 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, - /* 900 */ 457, 1485, 13, 13, 1541, 123, 123, 123, 123, 122, - /* 910 */ 122, 121, 121, 121, 120, 117, 448, 202, 572, 462, - /* 920 */ 1587, 578, 2, 1248, 843, 844, 845, 1563, 319, 409, - /* 930 */ 147, 6, 411, 257, 256, 255, 208, 1330, 9, 1196, - /* 940 */ 264, 72, 72, 1436, 123, 123, 123, 123, 122, 122, - /* 950 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, - /* 960 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 970 */ 125, 572, 286, 286, 572, 1213, 411, 577, 315, 1248, - /* 980 */ 421, 371, 1581, 356, 319, 569, 147, 495, 529, 1644, - /* 990 */ 397, 935, 495, 1330, 71, 71, 934, 72, 72, 242, - /* 1000 */ 1328, 105, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, - /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122, - /* 1020 */ 122, 121, 121, 121, 120, 117, 448, 1117, 286, 286, - /* 1030 */ 1422, 452, 1528, 1213, 443, 286, 286, 1492, 1355, 313, - /* 1040 */ 478, 569, 1118, 454, 351, 495, 354, 1266, 569, 209, - /* 1050 */ 572, 418, 179, 572, 1031, 242, 385, 1119, 523, 123, - /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, - /* 1070 */ 448, 1020, 108, 72, 72, 1019, 13, 13, 915, 572, - /* 1080 */ 1498, 572, 286, 286, 98, 530, 1537, 452, 916, 1334, - /* 1090 */ 1329, 203, 411, 286, 286, 569, 152, 211, 1498, 1500, - /* 1100 */ 426, 569, 56, 56, 57, 57, 569, 1019, 1019, 1021, - /* 1110 */ 447, 572, 411, 531, 12, 297, 126, 127, 81, 1217, - /* 1120 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 1130 */ 125, 572, 411, 867, 15, 15, 126, 127, 81, 1217, - /* 1140 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 1150 */ 125, 373, 529, 264, 44, 44, 126, 115, 81, 1217, - /* 1160 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, - /* 1170 */ 125, 1498, 478, 1271, 417, 123, 123, 123, 123, 122, - /* 1180 */ 122, 121, 121, 121, 120, 117, 448, 205, 1213, 495, - /* 1190 */ 430, 867, 468, 322, 495, 123, 123, 123, 123, 122, - /* 1200 */ 122, 121, 121, 121, 120, 117, 448, 572, 557, 1140, - /* 1210 */ 1642, 1422, 1642, 543, 572, 123, 123, 123, 123, 122, - /* 1220 */ 122, 121, 121, 121, 120, 117, 448, 572, 1422, 572, - /* 1230 */ 13, 13, 542, 323, 1325, 411, 334, 58, 58, 349, - /* 1240 */ 1422, 1170, 326, 286, 286, 549, 1213, 300, 895, 530, - /* 1250 */ 45, 45, 59, 59, 1140, 1643, 569, 1643, 565, 417, - /* 1260 */ 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, - /* 1270 */ 125, 125, 125, 125, 1367, 373, 500, 290, 1193, 512, - /* 1280 */ 1366, 427, 394, 394, 393, 275, 391, 896, 1138, 852, - /* 1290 */ 478, 258, 1422, 1170, 463, 1159, 12, 331, 428, 333, - /* 1300 */ 1117, 460, 236, 258, 325, 460, 544, 1544, 1159, 1098, - /* 1310 */ 491, 1159, 324, 1098, 440, 1118, 335, 516, 123, 123, - /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 448, - /* 1330 */ 1119, 318, 563, 1138, 572, 1193, 1194, 1193, 112, 564, - /* 1340 */ 201, 4, 238, 433, 935, 490, 285, 228, 1517, 934, - /* 1350 */ 170, 560, 572, 142, 1516, 567, 572, 60, 60, 572, - /* 1360 */ 416, 572, 441, 572, 535, 302, 875, 8, 487, 572, - /* 1370 */ 237, 572, 416, 572, 485, 61, 61, 572, 449, 62, - /* 1380 */ 62, 332, 63, 63, 46, 46, 47, 47, 361, 572, - /* 1390 */ 561, 572, 48, 48, 50, 50, 51, 51, 572, 295, - /* 1400 */ 64, 64, 482, 295, 539, 412, 471, 1031, 572, 538, - /* 1410 */ 318, 563, 65, 65, 66, 66, 409, 475, 572, 1031, - /* 1420 */ 572, 14, 14, 875, 1020, 110, 110, 409, 1019, 572, - /* 1430 */ 474, 67, 67, 111, 455, 449, 573, 449, 98, 317, - /* 1440 */ 1019, 132, 132, 133, 133, 572, 1561, 572, 974, 409, - /* 1450 */ 6, 1562, 68, 68, 1560, 6, 975, 572, 6, 1559, - /* 1460 */ 1019, 1019, 1021, 6, 346, 218, 101, 531, 53, 53, - /* 1470 */ 69, 69, 1019, 1019, 1021, 1022, 28, 1586, 1181, 451, - /* 1480 */ 70, 70, 290, 87, 215, 31, 1363, 394, 394, 393, - /* 1490 */ 275, 391, 350, 109, 852, 107, 572, 112, 564, 483, - /* 1500 */ 4, 1212, 572, 239, 153, 572, 39, 236, 1299, 325, - /* 1510 */ 112, 564, 1298, 4, 567, 572, 32, 324, 572, 54, - /* 1520 */ 54, 572, 1135, 353, 398, 165, 165, 567, 166, 166, - /* 1530 */ 572, 291, 355, 572, 17, 357, 572, 449, 77, 77, - /* 1540 */ 1313, 55, 55, 1297, 73, 73, 572, 238, 470, 561, - /* 1550 */ 449, 472, 364, 135, 135, 170, 74, 74, 142, 163, - /* 1560 */ 163, 374, 561, 539, 572, 321, 572, 886, 540, 137, - /* 1570 */ 137, 339, 1353, 422, 298, 237, 539, 572, 1031, 572, - /* 1580 */ 340, 538, 101, 369, 110, 110, 162, 131, 131, 164, - /* 1590 */ 164, 1031, 111, 368, 449, 573, 449, 110, 110, 1019, - /* 1600 */ 157, 157, 141, 141, 572, 111, 572, 449, 573, 449, - /* 1610 */ 412, 288, 1019, 572, 882, 318, 563, 572, 219, 572, - /* 1620 */ 241, 1012, 477, 263, 263, 894, 893, 140, 140, 138, - /* 1630 */ 138, 1019, 1019, 1021, 1022, 28, 139, 139, 525, 455, - /* 1640 */ 76, 76, 78, 78, 1019, 1019, 1021, 1022, 28, 1181, - /* 1650 */ 451, 572, 1083, 290, 112, 564, 1575, 4, 394, 394, - /* 1660 */ 393, 275, 391, 572, 1023, 852, 572, 479, 345, 263, - /* 1670 */ 101, 567, 882, 1376, 75, 75, 1421, 501, 236, 260, - /* 1680 */ 325, 112, 564, 359, 4, 101, 43, 43, 324, 49, - /* 1690 */ 49, 901, 902, 161, 449, 101, 977, 978, 567, 1079, - /* 1700 */ 1349, 260, 965, 932, 263, 114, 561, 1095, 517, 1095, - /* 1710 */ 1083, 1094, 865, 1094, 151, 933, 1144, 114, 238, 1361, - /* 1720 */ 558, 449, 1023, 559, 1426, 1278, 170, 1269, 1257, 142, - /* 1730 */ 1601, 1256, 1258, 561, 1594, 1031, 496, 278, 213, 1346, - /* 1740 */ 310, 110, 110, 939, 311, 312, 237, 11, 234, 111, - /* 1750 */ 221, 449, 573, 449, 293, 395, 1019, 1408, 337, 1403, - /* 1760 */ 1396, 338, 1031, 299, 343, 1413, 1412, 481, 110, 110, - /* 1770 */ 506, 402, 225, 1296, 206, 367, 111, 1358, 449, 573, - /* 1780 */ 449, 412, 1359, 1019, 1489, 1488, 318, 563, 1019, 1019, - /* 1790 */ 1021, 1022, 28, 562, 207, 220, 80, 564, 389, 4, - /* 1800 */ 1597, 1357, 552, 1356, 1233, 181, 267, 232, 1536, 1534, - /* 1810 */ 455, 1230, 420, 567, 82, 1019, 1019, 1021, 1022, 28, - /* 1820 */ 86, 217, 85, 1494, 190, 175, 183, 465, 185, 466, - /* 1830 */ 36, 1409, 186, 187, 188, 499, 449, 244, 37, 99, - /* 1840 */ 400, 1415, 1414, 488, 1417, 194, 473, 403, 561, 1483, - /* 1850 */ 248, 92, 1505, 494, 198, 279, 112, 564, 250, 4, - /* 1860 */ 348, 497, 405, 352, 1259, 251, 252, 515, 1316, 434, - /* 1870 */ 1315, 1314, 94, 567, 1307, 886, 1306, 1031, 226, 406, - /* 1880 */ 1611, 1610, 438, 110, 110, 1580, 1286, 524, 439, 308, - /* 1890 */ 266, 111, 1285, 449, 573, 449, 449, 309, 1019, 366, - /* 1900 */ 1284, 1609, 265, 1566, 1565, 442, 372, 1381, 561, 129, - /* 1910 */ 550, 1380, 10, 1470, 383, 106, 316, 551, 100, 35, - /* 1920 */ 534, 575, 212, 1339, 381, 387, 1187, 1338, 274, 276, - /* 1930 */ 1019, 1019, 1021, 1022, 28, 277, 413, 1031, 576, 1254, - /* 1940 */ 388, 1521, 1249, 110, 110, 167, 1522, 168, 148, 1520, - /* 1950 */ 1519, 111, 306, 449, 573, 449, 222, 223, 1019, 839, - /* 1960 */ 169, 79, 450, 214, 414, 233, 320, 145, 1093, 1091, - /* 1970 */ 328, 182, 171, 1212, 918, 184, 240, 336, 243, 1107, - /* 1980 */ 189, 172, 173, 423, 425, 88, 180, 191, 89, 90, - /* 1990 */ 1019, 1019, 1021, 1022, 28, 91, 174, 1110, 245, 1106, - /* 2000 */ 246, 159, 18, 247, 347, 1099, 263, 195, 1227, 493, - /* 2010 */ 249, 196, 38, 854, 498, 368, 253, 360, 897, 197, - /* 2020 */ 502, 93, 19, 20, 507, 884, 363, 510, 95, 307, - /* 2030 */ 160, 96, 518, 97, 1175, 1060, 1146, 40, 21, 227, - /* 2040 */ 176, 1145, 282, 284, 969, 200, 963, 114, 262, 1165, - /* 2050 */ 22, 23, 24, 1161, 1169, 25, 1163, 1150, 34, 26, - /* 2060 */ 1168, 546, 27, 204, 101, 103, 104, 1074, 7, 1061, - /* 2070 */ 1059, 1063, 1116, 1064, 1115, 268, 269, 29, 41, 270, - /* 2080 */ 1024, 866, 113, 30, 568, 392, 1183, 144, 178, 1182, - /* 2090 */ 271, 928, 1245, 1245, 1245, 1245, 1245, 1245, 1245, 1602, + /* 0 */ 576, 128, 125, 232, 1622, 549, 576, 1290, 1281, 576, + /* 10 */ 328, 576, 1300, 212, 576, 128, 125, 232, 578, 412, + /* 20 */ 578, 391, 1542, 51, 51, 523, 405, 1293, 529, 51, + /* 30 */ 51, 983, 51, 51, 81, 81, 1107, 61, 61, 984, + /* 40 */ 1107, 1292, 380, 135, 136, 90, 1228, 1228, 1063, 1066, + /* 50 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 1577, 412, + /* 60 */ 287, 287, 7, 287, 287, 422, 1050, 1050, 1064, 1067, + /* 70 */ 289, 556, 492, 573, 524, 561, 573, 497, 561, 482, + /* 80 */ 530, 262, 229, 135, 136, 90, 1228, 1228, 1063, 1066, + /* 90 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 128, 125, + /* 100 */ 232, 1506, 132, 132, 132, 132, 131, 131, 130, 130, + /* 110 */ 130, 129, 126, 450, 1204, 1255, 1, 1, 582, 2, + /* 120 */ 1259, 1571, 420, 1582, 379, 320, 1174, 153, 1174, 1584, + /* 130 */ 412, 378, 1582, 543, 1341, 330, 111, 570, 570, 570, + /* 140 */ 293, 1054, 132, 132, 132, 132, 131, 131, 130, 130, + /* 150 */ 130, 129, 126, 450, 135, 136, 90, 1228, 1228, 1063, + /* 160 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 287, + /* 170 */ 287, 1204, 1205, 1204, 255, 287, 287, 510, 507, 506, + /* 180 */ 137, 455, 573, 212, 561, 447, 446, 505, 573, 1616, + /* 190 */ 561, 134, 134, 134, 134, 127, 400, 243, 132, 132, + /* 200 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, + /* 210 */ 282, 471, 345, 132, 132, 132, 132, 131, 131, 130, + /* 220 */ 130, 130, 129, 126, 450, 574, 155, 936, 936, 454, + /* 230 */ 227, 521, 1236, 412, 1236, 134, 134, 134, 134, 132, + /* 240 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, + /* 250 */ 450, 130, 130, 130, 129, 126, 450, 135, 136, 90, + /* 260 */ 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, + /* 270 */ 134, 134, 128, 125, 232, 450, 576, 412, 397, 1249, + /* 280 */ 180, 92, 93, 132, 132, 132, 132, 131, 131, 130, + /* 290 */ 130, 130, 129, 126, 450, 381, 387, 1204, 383, 81, + /* 300 */ 81, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, + /* 310 */ 133, 133, 134, 134, 134, 134, 132, 132, 132, 132, + /* 320 */ 131, 131, 130, 130, 130, 129, 126, 450, 131, 131, + /* 330 */ 130, 130, 130, 129, 126, 450, 556, 1204, 302, 319, + /* 340 */ 567, 121, 568, 480, 4, 555, 1149, 1657, 1628, 1657, + /* 350 */ 45, 128, 125, 232, 1204, 1205, 1204, 1250, 571, 1169, + /* 360 */ 132, 132, 132, 132, 131, 131, 130, 130, 130, 129, + /* 370 */ 126, 450, 1169, 287, 287, 1169, 1019, 576, 422, 1019, + /* 380 */ 412, 451, 1602, 582, 2, 1259, 573, 44, 561, 95, + /* 390 */ 320, 110, 153, 565, 1204, 1205, 1204, 522, 522, 1341, + /* 400 */ 81, 81, 7, 44, 135, 136, 90, 1228, 1228, 1063, + /* 410 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 295, + /* 420 */ 1149, 1658, 1040, 1658, 1204, 1147, 319, 567, 119, 119, + /* 430 */ 343, 466, 331, 343, 287, 287, 120, 556, 451, 577, + /* 440 */ 451, 1169, 1169, 1028, 319, 567, 438, 573, 210, 561, + /* 450 */ 1339, 1451, 546, 531, 1169, 1169, 1598, 1169, 1169, 416, + /* 460 */ 319, 567, 243, 132, 132, 132, 132, 131, 131, 130, + /* 470 */ 130, 130, 129, 126, 450, 1028, 1028, 1030, 1031, 35, + /* 480 */ 44, 1204, 1205, 1204, 472, 287, 287, 1328, 412, 1307, + /* 490 */ 372, 1595, 359, 225, 454, 1204, 195, 1328, 573, 1147, + /* 500 */ 561, 1333, 1333, 274, 576, 1188, 576, 340, 46, 196, + /* 510 */ 537, 217, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, + /* 520 */ 1053, 133, 133, 134, 134, 134, 134, 19, 19, 19, + /* 530 */ 19, 412, 581, 1204, 1259, 511, 1204, 319, 567, 320, + /* 540 */ 944, 153, 425, 491, 430, 943, 1204, 488, 1341, 1450, + /* 550 */ 532, 1277, 1204, 1205, 1204, 135, 136, 90, 1228, 1228, + /* 560 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, + /* 570 */ 575, 132, 132, 132, 132, 131, 131, 130, 130, 130, + /* 580 */ 129, 126, 450, 287, 287, 528, 287, 287, 372, 1595, + /* 590 */ 1204, 1205, 1204, 1204, 1205, 1204, 573, 486, 561, 573, + /* 600 */ 889, 561, 412, 1204, 1205, 1204, 886, 40, 22, 22, + /* 610 */ 220, 243, 525, 1449, 132, 132, 132, 132, 131, 131, + /* 620 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, + /* 630 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 640 */ 134, 412, 180, 454, 1204, 879, 255, 287, 287, 510, + /* 650 */ 507, 506, 372, 1595, 1568, 1331, 1331, 576, 889, 505, + /* 660 */ 573, 44, 561, 559, 1207, 135, 136, 90, 1228, 1228, + /* 670 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, + /* 680 */ 81, 81, 422, 576, 377, 132, 132, 132, 132, 131, + /* 690 */ 131, 130, 130, 130, 129, 126, 450, 297, 287, 287, + /* 700 */ 460, 1204, 1205, 1204, 1204, 534, 19, 19, 448, 448, + /* 710 */ 448, 573, 412, 561, 230, 436, 1187, 535, 319, 567, + /* 720 */ 363, 432, 1207, 1435, 132, 132, 132, 132, 131, 131, + /* 730 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, + /* 740 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 750 */ 134, 412, 211, 949, 1169, 1041, 1110, 1110, 494, 547, + /* 760 */ 547, 1204, 1205, 1204, 7, 539, 1570, 1169, 376, 576, + /* 770 */ 1169, 5, 1204, 486, 3, 135, 136, 90, 1228, 1228, + /* 780 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, + /* 790 */ 576, 513, 19, 19, 427, 132, 132, 132, 132, 131, + /* 800 */ 131, 130, 130, 130, 129, 126, 450, 305, 1204, 433, + /* 810 */ 225, 1204, 385, 19, 19, 273, 290, 371, 516, 366, + /* 820 */ 515, 260, 412, 538, 1568, 549, 1024, 362, 437, 1204, + /* 830 */ 1205, 1204, 902, 1552, 132, 132, 132, 132, 131, 131, + /* 840 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, + /* 850 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 860 */ 134, 412, 1435, 514, 1281, 1204, 1205, 1204, 1204, 1205, + /* 870 */ 1204, 903, 48, 342, 1568, 1568, 1279, 1627, 1568, 911, + /* 880 */ 576, 129, 126, 450, 110, 135, 136, 90, 1228, 1228, + /* 890 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, + /* 900 */ 265, 576, 459, 19, 19, 132, 132, 132, 132, 131, + /* 910 */ 131, 130, 130, 130, 129, 126, 450, 1345, 204, 576, + /* 920 */ 459, 458, 50, 47, 19, 19, 49, 434, 1105, 573, + /* 930 */ 497, 561, 412, 428, 108, 1224, 1569, 1554, 376, 205, + /* 940 */ 550, 550, 81, 81, 132, 132, 132, 132, 131, 131, + /* 950 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, + /* 960 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 970 */ 134, 480, 576, 1204, 576, 1541, 412, 1435, 969, 315, + /* 980 */ 1659, 398, 284, 497, 969, 893, 1569, 1569, 376, 376, + /* 990 */ 1569, 461, 376, 1224, 459, 80, 80, 81, 81, 497, + /* 1000 */ 374, 114, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, + /* 1010 */ 133, 134, 134, 134, 134, 132, 132, 132, 132, 131, + /* 1020 */ 131, 130, 130, 130, 129, 126, 450, 1204, 1505, 576, + /* 1030 */ 1204, 1205, 1204, 1366, 316, 486, 281, 281, 497, 431, + /* 1040 */ 557, 288, 288, 402, 1340, 471, 345, 298, 429, 573, + /* 1050 */ 576, 561, 81, 81, 573, 374, 561, 971, 386, 132, + /* 1060 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, + /* 1070 */ 450, 231, 117, 81, 81, 287, 287, 231, 287, 287, + /* 1080 */ 576, 1511, 576, 1336, 1204, 1205, 1204, 139, 573, 556, + /* 1090 */ 561, 573, 412, 561, 441, 456, 969, 213, 558, 1511, + /* 1100 */ 1513, 1550, 969, 143, 143, 145, 145, 1368, 314, 478, + /* 1110 */ 444, 970, 412, 850, 851, 852, 135, 136, 90, 1228, + /* 1120 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 1130 */ 134, 357, 412, 397, 1148, 304, 135, 136, 90, 1228, + /* 1140 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 1150 */ 134, 1575, 323, 6, 862, 7, 135, 124, 90, 1228, + /* 1160 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, + /* 1170 */ 134, 409, 408, 1511, 212, 132, 132, 132, 132, 131, + /* 1180 */ 131, 130, 130, 130, 129, 126, 450, 411, 118, 1204, + /* 1190 */ 116, 10, 352, 265, 355, 132, 132, 132, 132, 131, + /* 1200 */ 131, 130, 130, 130, 129, 126, 450, 576, 324, 306, + /* 1210 */ 576, 306, 1250, 469, 158, 132, 132, 132, 132, 131, + /* 1220 */ 131, 130, 130, 130, 129, 126, 450, 207, 1224, 1126, + /* 1230 */ 65, 65, 470, 66, 66, 412, 447, 446, 882, 531, + /* 1240 */ 335, 258, 257, 256, 1127, 1233, 1204, 1205, 1204, 327, + /* 1250 */ 1235, 874, 159, 576, 16, 480, 1085, 1040, 1234, 1128, + /* 1260 */ 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, + /* 1270 */ 134, 134, 134, 134, 1029, 576, 81, 81, 1028, 1040, + /* 1280 */ 922, 576, 463, 1236, 576, 1236, 1224, 502, 107, 1435, + /* 1290 */ 923, 6, 576, 410, 1498, 882, 1029, 480, 21, 21, + /* 1300 */ 1028, 332, 1380, 334, 53, 53, 497, 81, 81, 874, + /* 1310 */ 1028, 1028, 1030, 445, 259, 19, 19, 533, 132, 132, + /* 1320 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, + /* 1330 */ 551, 301, 1028, 1028, 1030, 107, 532, 545, 121, 568, + /* 1340 */ 1188, 4, 1126, 1576, 449, 576, 462, 7, 1282, 418, + /* 1350 */ 462, 350, 1435, 576, 518, 571, 544, 1127, 121, 568, + /* 1360 */ 442, 4, 1188, 464, 533, 1180, 1223, 9, 67, 67, + /* 1370 */ 487, 576, 1128, 303, 410, 571, 54, 54, 451, 576, + /* 1380 */ 123, 944, 576, 417, 576, 333, 943, 1379, 576, 236, + /* 1390 */ 565, 576, 1574, 564, 68, 68, 7, 576, 451, 362, + /* 1400 */ 419, 182, 69, 69, 541, 70, 70, 71, 71, 540, + /* 1410 */ 565, 72, 72, 484, 55, 55, 473, 1180, 296, 1040, + /* 1420 */ 56, 56, 296, 493, 541, 119, 119, 410, 1573, 542, + /* 1430 */ 569, 418, 7, 120, 1244, 451, 577, 451, 465, 1040, + /* 1440 */ 1028, 576, 1557, 552, 476, 119, 119, 527, 259, 121, + /* 1450 */ 568, 240, 4, 120, 576, 451, 577, 451, 576, 477, + /* 1460 */ 1028, 576, 156, 576, 57, 57, 571, 576, 286, 229, + /* 1470 */ 410, 336, 1028, 1028, 1030, 1031, 35, 59, 59, 219, + /* 1480 */ 983, 60, 60, 220, 73, 73, 74, 74, 984, 451, + /* 1490 */ 75, 75, 1028, 1028, 1030, 1031, 35, 96, 216, 291, + /* 1500 */ 552, 565, 1188, 318, 395, 395, 394, 276, 392, 576, + /* 1510 */ 485, 859, 474, 1311, 410, 541, 576, 417, 1530, 1144, + /* 1520 */ 540, 399, 1188, 292, 237, 1153, 326, 38, 23, 576, + /* 1530 */ 1040, 576, 20, 20, 325, 299, 119, 119, 164, 76, + /* 1540 */ 76, 1529, 121, 568, 120, 4, 451, 577, 451, 203, + /* 1550 */ 576, 1028, 141, 141, 142, 142, 576, 322, 39, 571, + /* 1560 */ 341, 1021, 110, 264, 239, 901, 900, 423, 242, 908, + /* 1570 */ 909, 370, 173, 77, 77, 43, 479, 1310, 264, 62, + /* 1580 */ 62, 369, 451, 1028, 1028, 1030, 1031, 35, 1601, 1192, + /* 1590 */ 453, 1092, 238, 291, 565, 163, 1309, 110, 395, 395, + /* 1600 */ 394, 276, 392, 986, 987, 859, 481, 346, 264, 110, + /* 1610 */ 1032, 489, 576, 1188, 503, 1088, 261, 261, 237, 576, + /* 1620 */ 326, 121, 568, 1040, 4, 347, 1376, 413, 325, 119, + /* 1630 */ 119, 948, 319, 567, 351, 78, 78, 120, 571, 451, + /* 1640 */ 577, 451, 79, 79, 1028, 354, 356, 576, 360, 1092, + /* 1650 */ 110, 576, 974, 942, 264, 123, 457, 358, 239, 576, + /* 1660 */ 519, 451, 939, 1104, 123, 1104, 173, 576, 1032, 43, + /* 1670 */ 63, 63, 1324, 565, 168, 168, 1028, 1028, 1030, 1031, + /* 1680 */ 35, 576, 169, 169, 1308, 872, 238, 157, 1589, 576, + /* 1690 */ 86, 86, 365, 89, 568, 375, 4, 1103, 941, 1103, + /* 1700 */ 123, 576, 1040, 1389, 64, 64, 1188, 1434, 119, 119, + /* 1710 */ 571, 576, 82, 82, 563, 576, 120, 165, 451, 577, + /* 1720 */ 451, 413, 1362, 1028, 144, 144, 319, 567, 576, 1374, + /* 1730 */ 562, 498, 279, 451, 83, 83, 1439, 576, 166, 166, + /* 1740 */ 576, 1289, 554, 576, 1280, 565, 576, 12, 576, 1268, + /* 1750 */ 457, 146, 146, 1267, 576, 1028, 1028, 1030, 1031, 35, + /* 1760 */ 140, 140, 1269, 167, 167, 1609, 160, 160, 1359, 150, + /* 1770 */ 150, 149, 149, 311, 1040, 576, 312, 147, 147, 313, + /* 1780 */ 119, 119, 222, 235, 576, 1188, 396, 576, 120, 576, + /* 1790 */ 451, 577, 451, 1192, 453, 1028, 508, 291, 148, 148, + /* 1800 */ 1421, 1612, 395, 395, 394, 276, 392, 85, 85, 859, + /* 1810 */ 87, 87, 84, 84, 553, 576, 294, 576, 1426, 338, + /* 1820 */ 339, 1425, 237, 300, 326, 1416, 1409, 1028, 1028, 1030, + /* 1830 */ 1031, 35, 325, 344, 403, 483, 226, 1307, 52, 52, + /* 1840 */ 58, 58, 368, 1371, 1502, 566, 1501, 121, 568, 221, + /* 1850 */ 4, 208, 268, 209, 390, 1244, 1549, 1188, 1372, 1370, + /* 1860 */ 1369, 1547, 239, 184, 571, 233, 421, 1241, 95, 218, + /* 1870 */ 173, 1507, 193, 43, 91, 94, 178, 186, 467, 188, + /* 1880 */ 468, 1422, 13, 189, 190, 191, 501, 451, 245, 108, + /* 1890 */ 238, 401, 1428, 1427, 1430, 475, 404, 1496, 197, 565, + /* 1900 */ 14, 490, 249, 101, 1518, 496, 349, 280, 251, 201, + /* 1910 */ 353, 499, 252, 406, 1270, 253, 517, 1327, 1326, 435, + /* 1920 */ 1325, 1318, 103, 893, 1296, 413, 227, 407, 1040, 1626, + /* 1930 */ 319, 567, 1625, 1297, 119, 119, 439, 367, 1317, 1295, + /* 1940 */ 1624, 526, 120, 440, 451, 577, 451, 1594, 309, 1028, + /* 1950 */ 310, 373, 266, 267, 457, 1580, 1579, 443, 138, 1394, + /* 1960 */ 552, 1393, 11, 1483, 384, 115, 317, 1350, 109, 536, + /* 1970 */ 42, 579, 382, 214, 1349, 388, 1198, 389, 275, 277, + /* 1980 */ 278, 1028, 1028, 1030, 1031, 35, 580, 1265, 414, 1260, + /* 1990 */ 170, 415, 183, 1534, 1535, 1533, 171, 154, 307, 1532, + /* 2000 */ 846, 223, 224, 88, 452, 215, 172, 321, 234, 1102, + /* 2010 */ 152, 1188, 1100, 329, 185, 174, 1223, 925, 187, 241, + /* 2020 */ 337, 244, 1116, 192, 175, 176, 424, 426, 97, 194, + /* 2030 */ 98, 99, 100, 177, 1119, 1115, 246, 247, 161, 24, + /* 2040 */ 248, 348, 1238, 264, 1108, 250, 495, 199, 198, 15, + /* 2050 */ 861, 500, 369, 254, 504, 509, 512, 200, 102, 25, + /* 2060 */ 179, 361, 26, 364, 104, 891, 308, 162, 105, 904, + /* 2070 */ 520, 106, 1185, 1069, 1155, 17, 228, 27, 1154, 283, + /* 2080 */ 285, 263, 978, 202, 972, 123, 28, 1175, 29, 30, + /* 2090 */ 1179, 1171, 31, 1173, 1160, 41, 32, 206, 548, 33, + /* 2100 */ 110, 1178, 1083, 8, 112, 1070, 113, 1068, 1072, 34, + /* 2110 */ 1073, 560, 1125, 269, 1124, 270, 36, 18, 1194, 1033, + /* 2120 */ 873, 151, 122, 37, 393, 271, 272, 572, 181, 1193, + /* 2130 */ 1256, 1256, 1256, 935, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2140 */ 1256, 1617, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276, - /* 10 */ 193, 223, 219, 225, 206, 210, 211, 212, 193, 19, - /* 20 */ 219, 233, 216, 216, 217, 216, 217, 193, 295, 216, - /* 30 */ 217, 31, 193, 216, 217, 193, 228, 213, 230, 39, - /* 40 */ 206, 216, 217, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 193, 19, - /* 60 */ 185, 186, 187, 188, 189, 190, 253, 274, 275, 276, - /* 70 */ 195, 193, 197, 193, 261, 274, 275, 276, 253, 204, - /* 80 */ 238, 204, 81, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 274, 275, - /* 100 */ 276, 262, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 113, 239, 240, 239, 240, 210, 211, - /* 120 */ 212, 314, 315, 314, 59, 316, 86, 252, 88, 252, - /* 130 */ 19, 314, 315, 256, 257, 113, 25, 72, 296, 138, - /* 140 */ 139, 266, 102, 103, 104, 105, 106, 107, 108, 109, + /* 0 */ 194, 276, 277, 278, 216, 194, 194, 217, 194, 194, + /* 10 */ 194, 194, 224, 194, 194, 276, 277, 278, 204, 19, + /* 20 */ 206, 202, 297, 217, 218, 205, 207, 217, 205, 217, + /* 30 */ 218, 31, 217, 218, 217, 218, 29, 217, 218, 39, + /* 40 */ 33, 217, 220, 43, 44, 45, 46, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 312, 19, + /* 60 */ 240, 241, 316, 240, 241, 194, 46, 47, 48, 49, + /* 70 */ 22, 254, 65, 253, 254, 255, 253, 194, 255, 194, + /* 80 */ 263, 258, 259, 43, 44, 45, 46, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 276, 277, + /* 100 */ 278, 285, 102, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 59, 186, 187, 188, 189, 190, + /* 120 */ 191, 310, 239, 317, 318, 196, 86, 198, 88, 317, + /* 130 */ 19, 319, 317, 318, 205, 264, 25, 211, 212, 213, + /* 140 */ 205, 121, 102, 103, 104, 105, 106, 107, 108, 109, /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, - /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 81, - /* 170 */ 292, 59, 292, 298, 108, 109, 110, 111, 112, 113, - /* 180 */ 69, 116, 117, 118, 72, 106, 107, 193, 111, 112, - /* 190 */ 113, 54, 55, 56, 57, 58, 102, 103, 104, 105, - /* 200 */ 106, 107, 108, 109, 110, 111, 112, 113, 120, 25, - /* 210 */ 216, 217, 145, 102, 103, 104, 105, 106, 107, 108, - /* 220 */ 109, 110, 111, 112, 113, 231, 138, 139, 116, 117, - /* 230 */ 118, 164, 153, 19, 155, 54, 55, 56, 57, 102, + /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 240, + /* 170 */ 241, 116, 117, 118, 119, 240, 241, 122, 123, 124, + /* 180 */ 69, 298, 253, 194, 255, 106, 107, 132, 253, 141, + /* 190 */ 255, 54, 55, 56, 57, 58, 207, 268, 102, 103, + /* 200 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 210 */ 214, 128, 129, 102, 103, 104, 105, 106, 107, 108, + /* 220 */ 109, 110, 111, 112, 113, 134, 25, 136, 137, 300, + /* 230 */ 165, 166, 153, 19, 155, 54, 55, 56, 57, 102, /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 250 */ 113, 128, 129, 46, 47, 48, 49, 43, 44, 45, + /* 250 */ 113, 108, 109, 110, 111, 112, 113, 43, 44, 45, /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 270 */ 56, 57, 216, 193, 25, 59, 193, 19, 165, 166, - /* 280 */ 193, 67, 24, 102, 103, 104, 105, 106, 107, 108, - /* 290 */ 109, 110, 111, 112, 113, 73, 216, 217, 59, 216, - /* 300 */ 217, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 270 */ 56, 57, 276, 277, 278, 113, 194, 19, 22, 23, + /* 280 */ 194, 67, 24, 102, 103, 104, 105, 106, 107, 108, + /* 290 */ 109, 110, 111, 112, 113, 220, 250, 59, 252, 217, + /* 300 */ 218, 43, 44, 45, 46, 47, 48, 49, 50, 51, /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, - /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 121, 145, - /* 330 */ 59, 193, 116, 117, 118, 119, 273, 204, 122, 123, - /* 340 */ 124, 19, 20, 134, 22, 136, 137, 19, 132, 127, - /* 350 */ 128, 129, 24, 22, 23, 116, 117, 118, 36, 193, + /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 106, 107, + /* 330 */ 108, 109, 110, 111, 112, 113, 254, 59, 205, 138, + /* 340 */ 139, 19, 20, 194, 22, 263, 22, 23, 231, 25, + /* 350 */ 72, 276, 277, 278, 116, 117, 118, 101, 36, 76, /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 112, 113, 239, 240, 311, 312, 215, 106, 107, 241, - /* 380 */ 19, 59, 216, 217, 223, 252, 115, 116, 117, 118, - /* 390 */ 151, 120, 26, 71, 193, 308, 309, 193, 149, 128, - /* 400 */ 313, 216, 269, 81, 43, 44, 45, 46, 47, 48, - /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 253, - /* 420 */ 216, 217, 100, 95, 153, 59, 155, 261, 106, 107, - /* 430 */ 25, 193, 101, 193, 193, 231, 114, 25, 116, 117, - /* 440 */ 118, 113, 304, 121, 193, 204, 59, 119, 120, 121, - /* 450 */ 122, 123, 124, 125, 216, 217, 193, 216, 217, 131, - /* 460 */ 138, 139, 230, 102, 103, 104, 105, 106, 107, 108, + /* 370 */ 112, 113, 89, 240, 241, 92, 73, 194, 194, 73, + /* 380 */ 19, 59, 188, 189, 190, 191, 253, 81, 255, 151, + /* 390 */ 196, 25, 198, 71, 116, 117, 118, 311, 312, 205, + /* 400 */ 217, 218, 316, 81, 43, 44, 45, 46, 47, 48, + /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 270, + /* 420 */ 22, 23, 100, 25, 59, 101, 138, 139, 106, 107, + /* 430 */ 127, 128, 129, 127, 240, 241, 114, 254, 116, 117, + /* 440 */ 118, 76, 76, 121, 138, 139, 263, 253, 264, 255, + /* 450 */ 205, 275, 87, 19, 89, 89, 194, 92, 92, 199, + /* 460 */ 138, 139, 268, 102, 103, 104, 105, 106, 107, 108, /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, - /* 480 */ 239, 240, 116, 117, 118, 76, 193, 23, 19, 25, - /* 490 */ 22, 253, 23, 252, 253, 108, 87, 204, 89, 261, - /* 500 */ 198, 92, 261, 116, 117, 118, 193, 306, 307, 216, - /* 510 */ 217, 150, 43, 44, 45, 46, 47, 48, 49, 50, - /* 520 */ 51, 52, 53, 54, 55, 56, 57, 59, 193, 216, - /* 530 */ 217, 19, 239, 240, 283, 23, 106, 107, 108, 109, - /* 540 */ 110, 111, 112, 113, 73, 252, 253, 142, 308, 309, - /* 550 */ 138, 139, 81, 313, 145, 43, 44, 45, 46, 47, + /* 480 */ 81, 116, 117, 118, 129, 240, 241, 224, 19, 226, + /* 490 */ 314, 315, 23, 25, 300, 59, 22, 234, 253, 101, + /* 500 */ 255, 236, 237, 26, 194, 183, 194, 152, 72, 22, + /* 510 */ 145, 150, 43, 44, 45, 46, 47, 48, 49, 50, + /* 520 */ 51, 52, 53, 54, 55, 56, 57, 217, 218, 217, + /* 530 */ 218, 19, 189, 59, 191, 23, 59, 138, 139, 196, + /* 540 */ 135, 198, 232, 283, 232, 140, 59, 287, 205, 275, + /* 550 */ 116, 205, 116, 117, 118, 43, 44, 45, 46, 47, /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 570 */ 307, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 580 */ 111, 112, 113, 281, 116, 117, 118, 285, 23, 193, - /* 590 */ 25, 119, 59, 193, 122, 123, 124, 59, 127, 203, - /* 600 */ 59, 205, 19, 268, 132, 25, 23, 22, 193, 138, - /* 610 */ 139, 249, 204, 251, 102, 103, 104, 105, 106, 107, + /* 570 */ 194, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 580 */ 111, 112, 113, 240, 241, 194, 240, 241, 314, 315, + /* 590 */ 116, 117, 118, 116, 117, 118, 253, 194, 255, 253, + /* 600 */ 59, 255, 19, 116, 117, 118, 23, 22, 217, 218, + /* 610 */ 142, 268, 205, 275, 102, 103, 104, 105, 106, 107, /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 640 */ 57, 19, 22, 23, 59, 23, 25, 239, 240, 116, - /* 650 */ 117, 118, 193, 11, 116, 117, 118, 116, 117, 118, - /* 660 */ 252, 269, 22, 193, 15, 43, 44, 45, 46, 47, + /* 640 */ 57, 19, 194, 300, 59, 23, 119, 240, 241, 122, + /* 650 */ 123, 124, 314, 315, 194, 236, 237, 194, 117, 132, + /* 660 */ 253, 81, 255, 205, 59, 43, 44, 45, 46, 47, /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 680 */ 273, 143, 193, 118, 143, 102, 103, 104, 105, 106, - /* 690 */ 107, 108, 109, 110, 111, 112, 113, 76, 118, 59, - /* 700 */ 241, 116, 117, 118, 304, 216, 217, 292, 143, 60, - /* 710 */ 89, 241, 19, 92, 193, 193, 23, 22, 311, 312, - /* 720 */ 231, 101, 22, 143, 102, 103, 104, 105, 106, 107, + /* 680 */ 217, 218, 194, 194, 194, 102, 103, 104, 105, 106, + /* 690 */ 107, 108, 109, 110, 111, 112, 113, 294, 240, 241, + /* 700 */ 120, 116, 117, 118, 59, 194, 217, 218, 211, 212, + /* 710 */ 213, 253, 19, 255, 194, 19, 23, 254, 138, 139, + /* 720 */ 24, 232, 117, 194, 102, 103, 104, 105, 106, 107, /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59, - /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193, - /* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47, + /* 750 */ 57, 19, 264, 108, 76, 23, 127, 128, 129, 311, + /* 760 */ 312, 116, 117, 118, 316, 87, 306, 89, 308, 194, + /* 770 */ 92, 22, 59, 194, 22, 43, 44, 45, 46, 47, /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106, - /* 800 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 193, - /* 810 */ 193, 116, 117, 118, 216, 217, 116, 117, 118, 226, - /* 820 */ 80, 193, 19, 235, 236, 304, 23, 211, 212, 231, - /* 830 */ 204, 216, 217, 205, 102, 103, 104, 105, 106, 107, + /* 790 */ 194, 95, 217, 218, 265, 102, 103, 104, 105, 106, + /* 800 */ 107, 108, 109, 110, 111, 112, 113, 232, 59, 113, + /* 810 */ 25, 59, 194, 217, 218, 119, 120, 121, 122, 123, + /* 820 */ 124, 125, 19, 145, 194, 194, 23, 131, 232, 116, + /* 830 */ 117, 118, 35, 194, 102, 103, 104, 105, 106, 107, /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239, - /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193, - /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47, + /* 860 */ 57, 19, 194, 66, 194, 116, 117, 118, 116, 117, + /* 870 */ 118, 74, 242, 294, 194, 194, 206, 23, 194, 25, + /* 880 */ 194, 111, 112, 113, 25, 43, 44, 45, 46, 47, /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244, - /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254, - /* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117, - /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107, + /* 900 */ 24, 194, 194, 217, 218, 102, 103, 104, 105, 106, + /* 910 */ 107, 108, 109, 110, 111, 112, 113, 241, 232, 194, + /* 920 */ 212, 213, 242, 242, 217, 218, 242, 130, 11, 253, + /* 930 */ 194, 255, 19, 265, 149, 59, 306, 194, 308, 232, + /* 940 */ 309, 310, 217, 218, 102, 103, 104, 105, 106, 107, /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190, - /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301, - /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266, - /* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52, + /* 970 */ 57, 194, 194, 59, 194, 239, 19, 194, 25, 254, + /* 980 */ 303, 304, 23, 194, 25, 126, 306, 306, 308, 308, + /* 990 */ 306, 271, 308, 117, 286, 217, 218, 217, 218, 194, + /* 1000 */ 194, 159, 45, 46, 47, 48, 49, 50, 51, 52, /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, - /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240, - /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260, - /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262, - /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102, + /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 59, 239, 194, + /* 1030 */ 116, 117, 118, 260, 254, 194, 240, 241, 194, 233, + /* 1040 */ 205, 240, 241, 205, 239, 128, 129, 270, 265, 253, + /* 1050 */ 194, 255, 217, 218, 253, 194, 255, 143, 280, 102, /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193, - /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240, - /* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212, - /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155, - /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46, + /* 1070 */ 113, 118, 159, 217, 218, 240, 241, 118, 240, 241, + /* 1080 */ 194, 194, 194, 239, 116, 117, 118, 22, 253, 254, + /* 1090 */ 255, 253, 19, 255, 233, 194, 143, 24, 263, 212, + /* 1100 */ 213, 194, 143, 217, 218, 217, 218, 261, 262, 271, + /* 1110 */ 254, 143, 19, 7, 8, 9, 43, 44, 45, 46, /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46, + /* 1130 */ 57, 16, 19, 22, 23, 294, 43, 44, 45, 46, /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46, + /* 1150 */ 57, 312, 194, 214, 21, 316, 43, 44, 45, 46, /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193, - /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22, - /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193, - /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238, - /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116, - /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209, + /* 1170 */ 57, 106, 107, 286, 194, 102, 103, 104, 105, 106, + /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 207, 158, 59, + /* 1190 */ 160, 22, 77, 24, 79, 102, 103, 104, 105, 106, + /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 194, 194, 229, + /* 1210 */ 194, 231, 101, 80, 22, 102, 103, 104, 105, 106, + /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 288, 59, 12, + /* 1230 */ 217, 218, 293, 217, 218, 19, 106, 107, 59, 19, + /* 1240 */ 16, 127, 128, 129, 27, 115, 116, 117, 118, 194, + /* 1250 */ 120, 59, 22, 194, 24, 194, 123, 100, 128, 42, /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66, - /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17, - /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79, - /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29, - /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103, + /* 1270 */ 54, 55, 56, 57, 117, 194, 217, 218, 121, 100, + /* 1280 */ 63, 194, 245, 153, 194, 155, 117, 19, 115, 194, + /* 1290 */ 73, 214, 194, 256, 161, 116, 117, 194, 217, 218, + /* 1300 */ 121, 77, 194, 79, 217, 218, 194, 217, 218, 117, + /* 1310 */ 153, 154, 155, 254, 46, 217, 218, 144, 102, 103, /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20, - /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140, - /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193, - /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193, - /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216, - /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193, - /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260, - /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90, - /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100, - /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193, - /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244, - /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254, - /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309, - /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217, - /* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2, - /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12, - /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116, - /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32, - /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216, - /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217, - /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217, - /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71, - /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216, - /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216, - /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193, - /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216, - /* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121, - /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118, - /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193, - /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216, - /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162, - /* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1, - /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11, - /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25, - /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25, - /* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216, - /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23, - /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155, - /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193, - /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81, - /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255, - /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114, - /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267, - /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107, - /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117, - /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154, - /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22, - /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200, - /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157, - /* 1820 */ 151, 150, 294, 283, 22, 43, 234, 18, 237, 200, - /* 1830 */ 270, 272, 237, 237, 237, 18, 59, 199, 270, 149, - /* 1840 */ 246, 272, 272, 200, 234, 234, 246, 246, 71, 246, - /* 1850 */ 199, 158, 290, 62, 22, 200, 19, 20, 199, 22, - /* 1860 */ 289, 221, 221, 200, 200, 199, 199, 115, 218, 64, - /* 1870 */ 218, 218, 22, 36, 227, 126, 227, 100, 165, 221, - /* 1880 */ 224, 224, 24, 106, 107, 312, 218, 305, 113, 282, - /* 1890 */ 91, 114, 220, 116, 117, 118, 59, 282, 121, 218, - /* 1900 */ 218, 218, 200, 317, 317, 82, 221, 265, 71, 148, - /* 1910 */ 145, 265, 22, 277, 200, 158, 279, 140, 147, 25, - /* 1920 */ 146, 202, 248, 250, 249, 247, 13, 250, 194, 194, - /* 1930 */ 153, 154, 155, 156, 157, 6, 303, 100, 192, 192, - /* 1940 */ 246, 213, 192, 106, 107, 207, 213, 207, 222, 213, - /* 1950 */ 213, 114, 222, 116, 117, 118, 214, 214, 121, 4, - /* 1960 */ 207, 213, 3, 22, 303, 15, 163, 16, 23, 23, - /* 1970 */ 139, 151, 130, 25, 20, 142, 24, 16, 144, 1, - /* 1980 */ 142, 130, 130, 61, 37, 53, 300, 151, 53, 53, - /* 1990 */ 153, 154, 155, 156, 157, 53, 130, 116, 34, 1, - /* 2000 */ 141, 5, 22, 115, 161, 68, 25, 68, 75, 41, - /* 2010 */ 141, 115, 24, 20, 19, 131, 125, 23, 28, 22, - /* 2020 */ 67, 22, 22, 22, 67, 59, 24, 96, 22, 67, - /* 2030 */ 23, 149, 22, 25, 23, 23, 23, 22, 34, 141, - /* 2040 */ 37, 97, 23, 23, 116, 22, 143, 25, 34, 75, - /* 2050 */ 34, 34, 34, 88, 75, 34, 86, 23, 22, 34, - /* 2060 */ 93, 24, 34, 25, 25, 142, 142, 23, 44, 23, - /* 2070 */ 23, 23, 23, 11, 23, 25, 22, 22, 22, 141, - /* 2080 */ 23, 23, 22, 22, 25, 15, 1, 23, 25, 1, - /* 2090 */ 141, 135, 319, 319, 319, 319, 319, 319, 319, 141, - /* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2130 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2140 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2150 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2160 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2170 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2180 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2190 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2200 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2210 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2220 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2230 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2240 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2280 */ 319, 319, 319, 319, 319, + /* 1330 */ 232, 270, 153, 154, 155, 115, 116, 66, 19, 20, + /* 1340 */ 183, 22, 12, 312, 254, 194, 262, 316, 209, 210, + /* 1350 */ 266, 239, 194, 194, 108, 36, 85, 27, 19, 20, + /* 1360 */ 265, 22, 183, 245, 144, 94, 25, 48, 217, 218, + /* 1370 */ 293, 194, 42, 270, 256, 36, 217, 218, 59, 194, + /* 1380 */ 25, 135, 194, 115, 194, 161, 140, 194, 194, 15, + /* 1390 */ 71, 194, 312, 63, 217, 218, 316, 194, 59, 131, + /* 1400 */ 301, 302, 217, 218, 85, 217, 218, 217, 218, 90, + /* 1410 */ 71, 217, 218, 19, 217, 218, 245, 146, 262, 100, + /* 1420 */ 217, 218, 266, 265, 85, 106, 107, 256, 312, 90, + /* 1430 */ 209, 210, 316, 114, 60, 116, 117, 118, 194, 100, + /* 1440 */ 121, 194, 194, 145, 115, 106, 107, 19, 46, 19, + /* 1450 */ 20, 24, 22, 114, 194, 116, 117, 118, 194, 245, + /* 1460 */ 121, 194, 164, 194, 217, 218, 36, 194, 258, 259, + /* 1470 */ 256, 194, 153, 154, 155, 156, 157, 217, 218, 150, + /* 1480 */ 31, 217, 218, 142, 217, 218, 217, 218, 39, 59, + /* 1490 */ 217, 218, 153, 154, 155, 156, 157, 149, 150, 5, + /* 1500 */ 145, 71, 183, 245, 10, 11, 12, 13, 14, 194, + /* 1510 */ 116, 17, 129, 227, 256, 85, 194, 115, 194, 23, + /* 1520 */ 90, 25, 183, 99, 30, 97, 32, 22, 22, 194, + /* 1530 */ 100, 194, 217, 218, 40, 152, 106, 107, 23, 217, + /* 1540 */ 218, 194, 19, 20, 114, 22, 116, 117, 118, 257, + /* 1550 */ 194, 121, 217, 218, 217, 218, 194, 133, 53, 36, + /* 1560 */ 23, 23, 25, 25, 70, 120, 121, 61, 141, 7, + /* 1570 */ 8, 121, 78, 217, 218, 81, 23, 227, 25, 217, + /* 1580 */ 218, 131, 59, 153, 154, 155, 156, 157, 0, 1, + /* 1590 */ 2, 59, 98, 5, 71, 23, 227, 25, 10, 11, + /* 1600 */ 12, 13, 14, 83, 84, 17, 23, 23, 25, 25, + /* 1610 */ 59, 194, 194, 183, 23, 23, 25, 25, 30, 194, + /* 1620 */ 32, 19, 20, 100, 22, 194, 194, 133, 40, 106, + /* 1630 */ 107, 108, 138, 139, 194, 217, 218, 114, 36, 116, + /* 1640 */ 117, 118, 217, 218, 121, 194, 194, 194, 23, 117, + /* 1650 */ 25, 194, 23, 23, 25, 25, 162, 194, 70, 194, + /* 1660 */ 145, 59, 23, 153, 25, 155, 78, 194, 117, 81, + /* 1670 */ 217, 218, 194, 71, 217, 218, 153, 154, 155, 156, + /* 1680 */ 157, 194, 217, 218, 194, 23, 98, 25, 321, 194, + /* 1690 */ 217, 218, 194, 19, 20, 194, 22, 153, 23, 155, + /* 1700 */ 25, 194, 100, 194, 217, 218, 183, 194, 106, 107, + /* 1710 */ 36, 194, 217, 218, 237, 194, 114, 243, 116, 117, + /* 1720 */ 118, 133, 194, 121, 217, 218, 138, 139, 194, 194, + /* 1730 */ 194, 290, 289, 59, 217, 218, 194, 194, 217, 218, + /* 1740 */ 194, 194, 140, 194, 194, 71, 194, 244, 194, 194, + /* 1750 */ 162, 217, 218, 194, 194, 153, 154, 155, 156, 157, + /* 1760 */ 217, 218, 194, 217, 218, 194, 217, 218, 257, 217, + /* 1770 */ 218, 217, 218, 257, 100, 194, 257, 217, 218, 257, + /* 1780 */ 106, 107, 215, 299, 194, 183, 192, 194, 114, 194, + /* 1790 */ 116, 117, 118, 1, 2, 121, 221, 5, 217, 218, + /* 1800 */ 273, 197, 10, 11, 12, 13, 14, 217, 218, 17, + /* 1810 */ 217, 218, 217, 218, 140, 194, 246, 194, 273, 295, + /* 1820 */ 247, 273, 30, 247, 32, 269, 269, 153, 154, 155, + /* 1830 */ 156, 157, 40, 246, 273, 295, 230, 226, 217, 218, + /* 1840 */ 217, 218, 220, 261, 220, 282, 220, 19, 20, 244, + /* 1850 */ 22, 250, 141, 250, 246, 60, 201, 183, 261, 261, + /* 1860 */ 261, 201, 70, 299, 36, 299, 201, 38, 151, 150, + /* 1870 */ 78, 285, 22, 81, 296, 296, 43, 235, 18, 238, + /* 1880 */ 201, 274, 272, 238, 238, 238, 18, 59, 200, 149, + /* 1890 */ 98, 247, 274, 274, 235, 247, 247, 247, 235, 71, + /* 1900 */ 272, 201, 200, 158, 292, 62, 291, 201, 200, 22, + /* 1910 */ 201, 222, 200, 222, 201, 200, 115, 219, 219, 64, + /* 1920 */ 219, 228, 22, 126, 221, 133, 165, 222, 100, 225, + /* 1930 */ 138, 139, 225, 219, 106, 107, 24, 219, 228, 219, + /* 1940 */ 219, 307, 114, 113, 116, 117, 118, 315, 284, 121, + /* 1950 */ 284, 222, 201, 91, 162, 320, 320, 82, 148, 267, + /* 1960 */ 145, 267, 22, 279, 201, 158, 281, 251, 147, 146, + /* 1970 */ 25, 203, 250, 249, 251, 248, 13, 247, 195, 195, + /* 1980 */ 6, 153, 154, 155, 156, 157, 193, 193, 305, 193, + /* 1990 */ 208, 305, 302, 214, 214, 214, 208, 223, 223, 214, + /* 2000 */ 4, 215, 215, 214, 3, 22, 208, 163, 15, 23, + /* 2010 */ 16, 183, 23, 139, 151, 130, 25, 20, 142, 24, + /* 2020 */ 16, 144, 1, 142, 130, 130, 61, 37, 53, 151, + /* 2030 */ 53, 53, 53, 130, 116, 1, 34, 141, 5, 22, + /* 2040 */ 115, 161, 75, 25, 68, 141, 41, 115, 68, 24, + /* 2050 */ 20, 19, 131, 125, 67, 67, 96, 22, 22, 22, + /* 2060 */ 37, 23, 22, 24, 22, 59, 67, 23, 149, 28, + /* 2070 */ 22, 25, 23, 23, 23, 22, 141, 34, 97, 23, + /* 2080 */ 23, 34, 116, 22, 143, 25, 34, 75, 34, 34, + /* 2090 */ 75, 88, 34, 86, 23, 22, 34, 25, 24, 34, + /* 2100 */ 25, 93, 23, 44, 142, 23, 142, 23, 23, 22, + /* 2110 */ 11, 25, 23, 25, 23, 22, 22, 22, 1, 23, + /* 2120 */ 23, 23, 22, 22, 15, 141, 141, 25, 25, 1, + /* 2130 */ 322, 322, 322, 135, 322, 322, 322, 322, 322, 322, + /* 2140 */ 322, 141, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2150 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2160 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2170 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2180 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2190 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2200 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2210 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2220 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2230 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2240 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2250 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2260 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2270 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2280 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2290 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2300 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2310 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2320 */ 322, 322, 322, 322, 322, 322, 322, 322, }; -#define YY_SHIFT_COUNT (578) +#define YY_SHIFT_COUNT (582) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2088) +#define YY_SHIFT_MAX (2128) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837, - /* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837, - /* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1, - /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622, - /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, - /* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, - /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94, - /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533, - /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123, - /* 160 */ 113, 113, 113, 22, 22, 2100, 2100, 328, 328, 328, - /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187, - /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133, - /* 210 */ 822, 822, 67, 1193, 2100, 2100, 2100, 2100, 2100, 2100, - /* 220 */ 2100, 1307, 954, 954, 585, 472, 640, 387, 695, 538, - /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533, - /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533, - /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533, - /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288, - /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280, - /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768, - /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315, - /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417, - /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482, - /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802, - /* 330 */ 1782, 1809, 1809, 1809, 1809, 1665, 1817, 1690, 1671, 1671, - /* 340 */ 1690, 1802, 1782, 1690, 1782, 1690, 1665, 1817, 1693, 1791, - /* 350 */ 1665, 1817, 1832, 1665, 1817, 1665, 1817, 1832, 1752, 1752, - /* 360 */ 1752, 1805, 1850, 1850, 1832, 1752, 1749, 1752, 1805, 1752, - /* 370 */ 1752, 1713, 1858, 1775, 1775, 1832, 1665, 1799, 1799, 1823, - /* 380 */ 1823, 1761, 1765, 1890, 1665, 1757, 1761, 1771, 1774, 1690, - /* 390 */ 1894, 1913, 1913, 1929, 1929, 1929, 2100, 2100, 2100, 2100, - /* 400 */ 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, - /* 410 */ 2100, 207, 1220, 331, 620, 967, 806, 1074, 1499, 1432, - /* 420 */ 1463, 1479, 1419, 1422, 1557, 1512, 1598, 1599, 1644, 1645, - /* 430 */ 1654, 1660, 1555, 1505, 1684, 1462, 1670, 1563, 1619, 1593, - /* 440 */ 1676, 1679, 1613, 1680, 1554, 1558, 1689, 1692, 1605, 1589, - /* 450 */ 1955, 1959, 1941, 1803, 1950, 1951, 1945, 1946, 1831, 1820, - /* 460 */ 1842, 1948, 1948, 1952, 1833, 1954, 1834, 1961, 1978, 1838, - /* 470 */ 1851, 1948, 1852, 1922, 1947, 1948, 1836, 1932, 1935, 1936, - /* 480 */ 1942, 1866, 1881, 1964, 1859, 1998, 1996, 1980, 1888, 1843, - /* 490 */ 1937, 1981, 1939, 1933, 1968, 1869, 1896, 1988, 1993, 1995, - /* 500 */ 1884, 1891, 1997, 1953, 1999, 2000, 1994, 2001, 1957, 1966, - /* 510 */ 2002, 1931, 1990, 2006, 1962, 2003, 2007, 2004, 1882, 2010, - /* 520 */ 2011, 2012, 2008, 2013, 2015, 1944, 1898, 2019, 2020, 1928, - /* 530 */ 2014, 2023, 1903, 2022, 2016, 2017, 2018, 2021, 1965, 1974, - /* 540 */ 1970, 2024, 1979, 1967, 2025, 2034, 2036, 2037, 2038, 2039, - /* 550 */ 2028, 1923, 1924, 2044, 2022, 2046, 2047, 2048, 2049, 2050, - /* 560 */ 2051, 2054, 2062, 2055, 2056, 2057, 2058, 2060, 2061, 2059, - /* 570 */ 1956, 1938, 1949, 1958, 2063, 2064, 2070, 2085, 2088, + /* 0 */ 1792, 1588, 1494, 322, 322, 399, 306, 1319, 1339, 1430, + /* 10 */ 1828, 1828, 1828, 580, 399, 399, 399, 399, 399, 0, + /* 20 */ 0, 214, 1093, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 30 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1130, 1130, + /* 40 */ 365, 365, 55, 278, 436, 713, 713, 201, 201, 201, + /* 50 */ 201, 40, 111, 258, 361, 469, 512, 583, 622, 693, + /* 60 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, + /* 70 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, + /* 80 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1523, 1602, + /* 90 */ 1674, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 100 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 110 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 120 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 130 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 140 */ 137, 181, 181, 181, 181, 181, 181, 181, 96, 222, + /* 150 */ 143, 477, 713, 1133, 1268, 713, 713, 79, 79, 713, + /* 160 */ 770, 83, 65, 65, 65, 288, 162, 162, 2142, 2142, + /* 170 */ 696, 696, 696, 238, 474, 474, 474, 474, 1217, 1217, + /* 180 */ 678, 477, 324, 398, 713, 713, 713, 713, 713, 713, + /* 190 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, + /* 200 */ 713, 713, 713, 1220, 366, 366, 713, 917, 283, 283, + /* 210 */ 434, 434, 605, 605, 1298, 2142, 2142, 2142, 2142, 2142, + /* 220 */ 2142, 2142, 1179, 1157, 1157, 487, 527, 585, 645, 749, + /* 230 */ 914, 968, 752, 713, 713, 713, 713, 713, 713, 713, + /* 240 */ 713, 713, 713, 303, 713, 713, 713, 713, 713, 713, + /* 250 */ 713, 713, 713, 713, 713, 713, 797, 797, 797, 713, + /* 260 */ 713, 713, 959, 713, 713, 713, 1169, 1271, 713, 713, + /* 270 */ 1330, 713, 713, 713, 713, 713, 713, 713, 713, 629, + /* 280 */ 7, 91, 876, 876, 876, 876, 953, 91, 91, 1246, + /* 290 */ 1065, 1106, 1374, 1329, 1348, 468, 1348, 1394, 785, 1329, + /* 300 */ 1329, 785, 1329, 468, 1394, 859, 854, 1402, 1449, 1449, + /* 310 */ 1449, 1173, 1173, 1173, 1173, 1355, 1355, 1030, 1341, 405, + /* 320 */ 1230, 1795, 1795, 1711, 1711, 1829, 1829, 1711, 1717, 1719, + /* 330 */ 1850, 1833, 1860, 1860, 1860, 1860, 1711, 1868, 1740, 1719, + /* 340 */ 1719, 1740, 1850, 1833, 1740, 1833, 1740, 1711, 1868, 1745, + /* 350 */ 1843, 1711, 1868, 1887, 1711, 1868, 1711, 1868, 1887, 1801, + /* 360 */ 1801, 1801, 1855, 1900, 1900, 1887, 1801, 1797, 1801, 1855, + /* 370 */ 1801, 1801, 1761, 1912, 1830, 1830, 1887, 1711, 1862, 1862, + /* 380 */ 1875, 1875, 1810, 1815, 1940, 1711, 1807, 1810, 1821, 1823, + /* 390 */ 1740, 1945, 1963, 1963, 1974, 1974, 1974, 2142, 2142, 2142, + /* 400 */ 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, + /* 410 */ 2142, 2142, 20, 1224, 256, 1111, 1115, 1114, 1192, 1496, + /* 420 */ 1424, 1505, 1427, 355, 1383, 1537, 1506, 1538, 1553, 1583, + /* 430 */ 1584, 1591, 1625, 541, 1445, 1562, 1450, 1572, 1515, 1428, + /* 440 */ 1532, 1592, 1629, 1520, 1630, 1639, 1510, 1544, 1662, 1675, + /* 450 */ 1551, 48, 1996, 2001, 1983, 1844, 1993, 1994, 1986, 1989, + /* 460 */ 1874, 1863, 1885, 1991, 1991, 1995, 1876, 1997, 1877, 2004, + /* 470 */ 2021, 1881, 1894, 1991, 1895, 1965, 1990, 1991, 1878, 1975, + /* 480 */ 1977, 1978, 1979, 1903, 1918, 2002, 1896, 2034, 2033, 2017, + /* 490 */ 1925, 1880, 1976, 2018, 1980, 1967, 2005, 1904, 1932, 2025, + /* 500 */ 2030, 2032, 1921, 1928, 2035, 1987, 2036, 2037, 2038, 2040, + /* 510 */ 1988, 2006, 2039, 1960, 2041, 2042, 1999, 2023, 2044, 2043, + /* 520 */ 1919, 2048, 2049, 2050, 2046, 2051, 2053, 1981, 1935, 2056, + /* 530 */ 2057, 1966, 2047, 2061, 1941, 2060, 2052, 2054, 2055, 2058, + /* 540 */ 2003, 2012, 2007, 2059, 2015, 2008, 2062, 2071, 2073, 2074, + /* 550 */ 2072, 2075, 2065, 1962, 1964, 2079, 2060, 2082, 2084, 2085, + /* 560 */ 2087, 2086, 2089, 2088, 2091, 2093, 2099, 2094, 2095, 2096, + /* 570 */ 2097, 2100, 2101, 2102, 1998, 1984, 1985, 2000, 2103, 2098, + /* 580 */ 2109, 2117, 2128, }; -#define YY_REDUCE_COUNT (410) -#define YY_REDUCE_MIN (-271) -#define YY_REDUCE_MAX (1753) +#define YY_REDUCE_COUNT (411) +#define YY_REDUCE_MIN (-275) +#define YY_REDUCE_MAX (1798) static const short yy_reduce_ofst[] = { - /* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187, - /* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489, - /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781, - /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843, - /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271, - /* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80, - /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141, - /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196, - /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303, - /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371, - /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470, - /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521, - /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901, - /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161, - /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688, - /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122, - /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400, - /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139, - /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090, - /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601, - /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081, - /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293, - /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359, - /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487, - /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450, - /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536, - /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495, - /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566, - /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604, - /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560, - /* 330 */ 1592, 1591, 1595, 1596, 1597, 1629, 1638, 1594, 1569, 1570, - /* 340 */ 1600, 1568, 1610, 1601, 1611, 1603, 1643, 1651, 1562, 1571, - /* 350 */ 1655, 1659, 1640, 1663, 1666, 1664, 1667, 1641, 1650, 1652, - /* 360 */ 1653, 1647, 1656, 1657, 1658, 1668, 1672, 1681, 1649, 1682, - /* 370 */ 1683, 1573, 1582, 1607, 1615, 1685, 1702, 1586, 1587, 1642, - /* 380 */ 1646, 1673, 1675, 1636, 1714, 1637, 1677, 1674, 1678, 1694, - /* 390 */ 1719, 1734, 1735, 1746, 1747, 1750, 1633, 1661, 1686, 1738, - /* 400 */ 1728, 1733, 1736, 1737, 1740, 1726, 1730, 1742, 1743, 1748, - /* 410 */ 1753, + /* 0 */ -71, 194, 343, 835, -180, -177, 838, -194, -188, -185, + /* 10 */ -183, 82, 183, -65, 133, 245, 346, 407, 458, -178, + /* 20 */ 75, -275, -4, 310, 312, 489, 575, 596, 463, 686, + /* 30 */ 707, 725, 780, 1098, 856, 778, 1059, 1090, 708, 887, + /* 40 */ 86, 448, 980, 630, 680, 681, 684, 796, 801, 796, + /* 50 */ 801, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 60 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 70 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 80 */ -261, -261, -261, -261, -261, -261, -261, -261, 391, 886, + /* 90 */ 888, 1013, 1016, 1081, 1087, 1151, 1159, 1177, 1185, 1188, + /* 100 */ 1190, 1194, 1197, 1203, 1247, 1260, 1264, 1267, 1269, 1273, + /* 110 */ 1315, 1322, 1335, 1337, 1356, 1362, 1418, 1425, 1453, 1457, + /* 120 */ 1465, 1473, 1487, 1495, 1507, 1517, 1521, 1534, 1543, 1546, + /* 130 */ 1549, 1552, 1554, 1560, 1581, 1590, 1593, 1595, 1621, 1623, + /* 140 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 150 */ -261, -186, -117, 260, 263, 460, 631, -74, 497, -181, + /* 160 */ -261, 939, 176, 274, 338, 676, -261, -261, -261, -261, + /* 170 */ -212, -212, -212, -184, 149, 777, 1061, 1103, 265, 419, + /* 180 */ -254, 670, 677, 677, -11, -129, 184, 488, 736, 789, + /* 190 */ 805, 844, 403, 529, 579, 668, 783, 841, 1158, 1112, + /* 200 */ 806, 861, 1095, 846, 839, 1031, -189, 1077, 1080, 1116, + /* 210 */ 1084, 1156, 1139, 1221, 46, 1099, 1037, 1118, 1171, 1214, + /* 220 */ 1210, 1258, -210, -190, -176, -115, 117, 262, 376, 490, + /* 230 */ 511, 520, 618, 639, 743, 901, 907, 958, 1014, 1055, + /* 240 */ 1108, 1193, 1244, 720, 1248, 1277, 1324, 1347, 1417, 1431, + /* 250 */ 1432, 1440, 1451, 1452, 1463, 1478, 1286, 1350, 1369, 1490, + /* 260 */ 1498, 1501, 773, 1509, 1513, 1528, 1292, 1367, 1535, 1536, + /* 270 */ 1477, 1542, 376, 1547, 1550, 1555, 1559, 1568, 1571, 1441, + /* 280 */ 1443, 1474, 1511, 1516, 1519, 1522, 773, 1474, 1474, 1503, + /* 290 */ 1567, 1594, 1484, 1527, 1556, 1570, 1557, 1524, 1573, 1545, + /* 300 */ 1548, 1576, 1561, 1587, 1540, 1575, 1606, 1611, 1622, 1624, + /* 310 */ 1626, 1582, 1597, 1598, 1599, 1601, 1603, 1563, 1608, 1605, + /* 320 */ 1604, 1564, 1566, 1655, 1660, 1578, 1579, 1665, 1586, 1607, + /* 330 */ 1610, 1642, 1641, 1645, 1646, 1647, 1679, 1688, 1644, 1618, + /* 340 */ 1619, 1648, 1628, 1659, 1649, 1663, 1650, 1700, 1702, 1612, + /* 350 */ 1615, 1706, 1708, 1689, 1709, 1712, 1713, 1715, 1691, 1698, + /* 360 */ 1699, 1701, 1693, 1704, 1707, 1705, 1714, 1703, 1718, 1710, + /* 370 */ 1720, 1721, 1632, 1634, 1664, 1666, 1729, 1751, 1635, 1636, + /* 380 */ 1692, 1694, 1716, 1722, 1684, 1763, 1685, 1723, 1724, 1727, + /* 390 */ 1730, 1768, 1783, 1784, 1793, 1794, 1796, 1683, 1686, 1690, + /* 400 */ 1782, 1779, 1780, 1781, 1785, 1788, 1774, 1775, 1786, 1787, + /* 410 */ 1789, 1798, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1648, 1648, 1648, 1478, 1243, 1354, 1243, 1243, 1243, 1478, - /* 10 */ 1478, 1478, 1243, 1384, 1384, 1531, 1276, 1243, 1243, 1243, - /* 20 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1477, 1243, - /* 30 */ 1243, 1243, 1243, 1564, 1564, 1243, 1243, 1243, 1243, 1243, - /* 40 */ 1243, 1243, 1243, 1393, 1243, 1400, 1243, 1243, 1243, 1243, - /* 50 */ 1243, 1479, 1480, 1243, 1243, 1243, 1530, 1532, 1495, 1407, - /* 60 */ 1406, 1405, 1404, 1513, 1372, 1398, 1391, 1395, 1474, 1475, - /* 70 */ 1473, 1626, 1480, 1479, 1243, 1394, 1442, 1458, 1441, 1243, - /* 80 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 90 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 100 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 110 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 120 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 130 */ 1243, 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443, - /* 140 */ 1445, 1446, 1243, 1243, 1267, 1243, 1243, 1264, 1318, 1243, - /* 150 */ 1243, 1243, 1243, 1243, 1550, 1549, 1243, 1447, 1243, 1276, - /* 160 */ 1435, 1434, 1433, 1461, 1448, 1460, 1459, 1538, 1600, 1599, - /* 170 */ 1496, 1243, 1243, 1243, 1243, 1243, 1243, 1564, 1243, 1243, - /* 180 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 190 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 200 */ 1243, 1374, 1564, 1564, 1243, 1276, 1564, 1564, 1375, 1375, - /* 210 */ 1272, 1272, 1378, 1243, 1545, 1345, 1345, 1345, 1345, 1354, - /* 220 */ 1345, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 230 */ 1243, 1243, 1243, 1243, 1243, 1243, 1535, 1533, 1243, 1243, - /* 240 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 250 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 260 */ 1243, 1243, 1243, 1243, 1243, 1350, 1243, 1243, 1243, 1243, - /* 270 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1593, 1243, 1508, - /* 280 */ 1332, 1350, 1350, 1350, 1350, 1352, 1333, 1331, 1344, 1277, - /* 290 */ 1250, 1640, 1410, 1399, 1351, 1399, 1637, 1397, 1410, 1410, - /* 300 */ 1397, 1410, 1351, 1637, 1293, 1615, 1288, 1384, 1384, 1384, - /* 310 */ 1374, 1374, 1374, 1374, 1378, 1378, 1476, 1351, 1344, 1243, - /* 320 */ 1640, 1640, 1360, 1360, 1639, 1639, 1360, 1496, 1623, 1419, - /* 330 */ 1321, 1327, 1327, 1327, 1327, 1360, 1261, 1397, 1623, 1623, - /* 340 */ 1397, 1419, 1321, 1397, 1321, 1397, 1360, 1261, 1512, 1634, - /* 350 */ 1360, 1261, 1486, 1360, 1261, 1360, 1261, 1486, 1319, 1319, - /* 360 */ 1319, 1308, 1243, 1243, 1486, 1319, 1293, 1319, 1308, 1319, - /* 370 */ 1319, 1582, 1243, 1490, 1490, 1486, 1360, 1574, 1574, 1387, - /* 380 */ 1387, 1392, 1378, 1481, 1360, 1243, 1392, 1390, 1388, 1397, - /* 390 */ 1311, 1596, 1596, 1592, 1592, 1592, 1645, 1645, 1545, 1608, - /* 400 */ 1276, 1276, 1276, 1276, 1608, 1295, 1295, 1277, 1277, 1276, - /* 410 */ 1608, 1243, 1243, 1243, 1243, 1243, 1243, 1603, 1243, 1540, - /* 420 */ 1497, 1364, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 430 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1551, 1243, - /* 440 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1424, - /* 450 */ 1243, 1246, 1542, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 460 */ 1243, 1401, 1402, 1365, 1243, 1243, 1243, 1243, 1243, 1243, - /* 470 */ 1243, 1416, 1243, 1243, 1243, 1411, 1243, 1243, 1243, 1243, - /* 480 */ 1243, 1243, 1243, 1243, 1636, 1243, 1243, 1243, 1243, 1243, - /* 490 */ 1243, 1511, 1510, 1243, 1243, 1362, 1243, 1243, 1243, 1243, - /* 500 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1291, - /* 510 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 520 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, - /* 530 */ 1243, 1243, 1243, 1389, 1243, 1243, 1243, 1243, 1243, 1243, - /* 540 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1579, 1379, - /* 550 */ 1243, 1243, 1243, 1243, 1627, 1243, 1243, 1243, 1243, 1243, - /* 560 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1619, - /* 570 */ 1335, 1425, 1243, 1428, 1265, 1243, 1255, 1243, 1243, + /* 0 */ 1663, 1663, 1663, 1491, 1254, 1367, 1254, 1254, 1254, 1254, + /* 10 */ 1491, 1491, 1491, 1254, 1254, 1254, 1254, 1254, 1254, 1397, + /* 20 */ 1397, 1544, 1287, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 30 */ 1254, 1254, 1254, 1254, 1254, 1490, 1254, 1254, 1254, 1254, + /* 40 */ 1578, 1578, 1254, 1254, 1254, 1254, 1254, 1563, 1562, 1254, + /* 50 */ 1254, 1254, 1406, 1254, 1413, 1254, 1254, 1254, 1254, 1254, + /* 60 */ 1492, 1493, 1254, 1254, 1254, 1543, 1545, 1508, 1420, 1419, + /* 70 */ 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, 1486, + /* 80 */ 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, 1254, + /* 90 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 100 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 110 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 120 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 130 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 140 */ 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, 1456, 1458, + /* 150 */ 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, 1254, 1254, + /* 160 */ 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, 1473, 1472, + /* 170 */ 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, 1254, 1254, + /* 180 */ 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 190 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 200 */ 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, 1578, 1578, + /* 210 */ 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, 1358, 1358, + /* 220 */ 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, 1546, 1254, + /* 240 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 250 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, 1254, 1254, + /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, 1254, + /* 280 */ 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, 1357, + /* 290 */ 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, 1423, + /* 300 */ 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, 1397, + /* 310 */ 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, 1357, + /* 320 */ 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, 1638, + /* 330 */ 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, 1638, + /* 340 */ 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, 1525, + /* 350 */ 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, 1330, + /* 360 */ 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, 1319, + /* 370 */ 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, 1588, + /* 380 */ 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, 1401, + /* 390 */ 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, 1558, + /* 400 */ 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, 1288, + /* 410 */ 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, 1254, + /* 420 */ 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1564, + /* 440 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 450 */ 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, 1254, + /* 460 */ 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, 1254, + /* 470 */ 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, 1254, + /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, 1254, + /* 490 */ 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, 1254, + /* 500 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 510 */ 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 520 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 530 */ 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, 1254, + /* 540 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 550 */ 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, 1254, + /* 560 */ 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 570 */ 1254, 1254, 1254, 1634, 1346, 1438, 1254, 1441, 1276, 1254, + /* 580 */ 1266, 1254, 1254, }; /********** End of lemon-generated parsing tables *****************************/ @@ -172302,8 +173893,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* TRUEFALSE => nothing */ 0, /* ISNOT => nothing */ 0, /* FUNCTION => nothing */ - 0, /* UMINUS => nothing */ 0, /* UPLUS => nothing */ + 0, /* UMINUS => nothing */ 0, /* TRUTH => nothing */ 0, /* REGISTER => nothing */ 0, /* VECTOR => nothing */ @@ -172312,6 +173903,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* ASTERISK => nothing */ 0, /* SPAN => nothing */ 0, /* ERROR => nothing */ + 0, /* QNUMBER => nothing */ 0, /* SPACE => nothing */ 0, /* ILLEGAL => nothing */ }; @@ -172354,14 +173946,9 @@ struct yyParser { #endif sqlite3ParserARG_SDECL /* A place to hold %extra_argument */ sqlite3ParserCTX_SDECL /* A place to hold %extra_context */ -#if YYSTACKDEPTH<=0 - int yystksz; /* Current side of the stack */ - yyStackEntry *yystack; /* The parser's stack */ - yyStackEntry yystk0; /* First stack entry */ -#else - yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ - yyStackEntry *yystackEnd; /* Last entry in the stack */ -#endif + yyStackEntry *yystackEnd; /* Last entry in the stack */ + yyStackEntry *yystack; /* The parser stack */ + yyStackEntry yystk0[YYSTACKDEPTH]; /* Initial stack space */ }; typedef struct yyParser yyParser; @@ -172575,8 +174162,8 @@ static const char *const yyTokenName[] = { /* 170 */ "TRUEFALSE", /* 171 */ "ISNOT", /* 172 */ "FUNCTION", - /* 173 */ "UMINUS", - /* 174 */ "UPLUS", + /* 173 */ "UPLUS", + /* 174 */ "UMINUS", /* 175 */ "TRUTH", /* 176 */ "REGISTER", /* 177 */ "VECTOR", @@ -172585,142 +174172,145 @@ static const char *const yyTokenName[] = { /* 180 */ "ASTERISK", /* 181 */ "SPAN", /* 182 */ "ERROR", - /* 183 */ "SPACE", - /* 184 */ "ILLEGAL", - /* 185 */ "input", - /* 186 */ "cmdlist", - /* 187 */ "ecmd", - /* 188 */ "cmdx", - /* 189 */ "explain", - /* 190 */ "cmd", - /* 191 */ "transtype", - /* 192 */ "trans_opt", - /* 193 */ "nm", - /* 194 */ "savepoint_opt", - /* 195 */ "create_table", - /* 196 */ "create_table_args", - /* 197 */ "createkw", - /* 198 */ "temp", - /* 199 */ "ifnotexists", - /* 200 */ "dbnm", - /* 201 */ "columnlist", - /* 202 */ "conslist_opt", - /* 203 */ "table_option_set", - /* 204 */ "select", - /* 205 */ "table_option", - /* 206 */ "columnname", - /* 207 */ "carglist", - /* 208 */ "typetoken", - /* 209 */ "typename", - /* 210 */ "signed", - /* 211 */ "plus_num", - /* 212 */ "minus_num", - /* 213 */ "scanpt", - /* 214 */ "scantok", - /* 215 */ "ccons", - /* 216 */ "term", - /* 217 */ "expr", - /* 218 */ "onconf", - /* 219 */ "sortorder", - /* 220 */ "autoinc", - /* 221 */ "eidlist_opt", - /* 222 */ "refargs", - /* 223 */ "defer_subclause", - /* 224 */ "generated", - /* 225 */ "refarg", - /* 226 */ "refact", - /* 227 */ "init_deferred_pred_opt", - /* 228 */ "conslist", - /* 229 */ "tconscomma", - /* 230 */ "tcons", - /* 231 */ "sortlist", - /* 232 */ "eidlist", - /* 233 */ "defer_subclause_opt", - /* 234 */ "orconf", - /* 235 */ "resolvetype", - /* 236 */ "raisetype", - /* 237 */ "ifexists", - /* 238 */ "fullname", - /* 239 */ "selectnowith", - /* 240 */ "oneselect", - /* 241 */ "wqlist", - /* 242 */ "multiselect_op", - /* 243 */ "distinct", - /* 244 */ "selcollist", - /* 245 */ "from", - /* 246 */ "where_opt", - /* 247 */ "groupby_opt", - /* 248 */ "having_opt", - /* 249 */ "orderby_opt", - /* 250 */ "limit_opt", - /* 251 */ "window_clause", - /* 252 */ "values", - /* 253 */ "nexprlist", - /* 254 */ "sclp", - /* 255 */ "as", - /* 256 */ "seltablist", - /* 257 */ "stl_prefix", - /* 258 */ "joinop", - /* 259 */ "on_using", - /* 260 */ "indexed_by", - /* 261 */ "exprlist", - /* 262 */ "xfullname", - /* 263 */ "idlist", - /* 264 */ "indexed_opt", - /* 265 */ "nulls", - /* 266 */ "with", - /* 267 */ "where_opt_ret", - /* 268 */ "setlist", - /* 269 */ "insert_cmd", - /* 270 */ "idlist_opt", - /* 271 */ "upsert", - /* 272 */ "returning", - /* 273 */ "filter_over", - /* 274 */ "likeop", - /* 275 */ "between_op", - /* 276 */ "in_op", - /* 277 */ "paren_exprlist", - /* 278 */ "case_operand", - /* 279 */ "case_exprlist", - /* 280 */ "case_else", - /* 281 */ "uniqueflag", - /* 282 */ "collate", - /* 283 */ "vinto", - /* 284 */ "nmnum", - /* 285 */ "trigger_decl", - /* 286 */ "trigger_cmd_list", - /* 287 */ "trigger_time", - /* 288 */ "trigger_event", - /* 289 */ "foreach_clause", - /* 290 */ "when_clause", - /* 291 */ "trigger_cmd", - /* 292 */ "trnm", - /* 293 */ "tridxby", - /* 294 */ "database_kw_opt", - /* 295 */ "key_opt", - /* 296 */ "add_column_fullname", - /* 297 */ "kwcolumn_opt", - /* 298 */ "create_vtab", - /* 299 */ "vtabarglist", - /* 300 */ "vtabarg", - /* 301 */ "vtabargtoken", - /* 302 */ "lp", - /* 303 */ "anylist", - /* 304 */ "wqitem", - /* 305 */ "wqas", - /* 306 */ "windowdefn_list", - /* 307 */ "windowdefn", - /* 308 */ "window", - /* 309 */ "frame_opt", - /* 310 */ "part_opt", - /* 311 */ "filter_clause", - /* 312 */ "over_clause", - /* 313 */ "range_or_rows", - /* 314 */ "frame_bound", - /* 315 */ "frame_bound_s", - /* 316 */ "frame_bound_e", - /* 317 */ "frame_exclude_opt", - /* 318 */ "frame_exclude", + /* 183 */ "QNUMBER", + /* 184 */ "SPACE", + /* 185 */ "ILLEGAL", + /* 186 */ "input", + /* 187 */ "cmdlist", + /* 188 */ "ecmd", + /* 189 */ "cmdx", + /* 190 */ "explain", + /* 191 */ "cmd", + /* 192 */ "transtype", + /* 193 */ "trans_opt", + /* 194 */ "nm", + /* 195 */ "savepoint_opt", + /* 196 */ "create_table", + /* 197 */ "create_table_args", + /* 198 */ "createkw", + /* 199 */ "temp", + /* 200 */ "ifnotexists", + /* 201 */ "dbnm", + /* 202 */ "columnlist", + /* 203 */ "conslist_opt", + /* 204 */ "table_option_set", + /* 205 */ "select", + /* 206 */ "table_option", + /* 207 */ "columnname", + /* 208 */ "carglist", + /* 209 */ "typetoken", + /* 210 */ "typename", + /* 211 */ "signed", + /* 212 */ "plus_num", + /* 213 */ "minus_num", + /* 214 */ "scanpt", + /* 215 */ "scantok", + /* 216 */ "ccons", + /* 217 */ "term", + /* 218 */ "expr", + /* 219 */ "onconf", + /* 220 */ "sortorder", + /* 221 */ "autoinc", + /* 222 */ "eidlist_opt", + /* 223 */ "refargs", + /* 224 */ "defer_subclause", + /* 225 */ "generated", + /* 226 */ "refarg", + /* 227 */ "refact", + /* 228 */ "init_deferred_pred_opt", + /* 229 */ "conslist", + /* 230 */ "tconscomma", + /* 231 */ "tcons", + /* 232 */ "sortlist", + /* 233 */ "eidlist", + /* 234 */ "defer_subclause_opt", + /* 235 */ "orconf", + /* 236 */ "resolvetype", + /* 237 */ "raisetype", + /* 238 */ "ifexists", + /* 239 */ "fullname", + /* 240 */ "selectnowith", + /* 241 */ "oneselect", + /* 242 */ "wqlist", + /* 243 */ "multiselect_op", + /* 244 */ "distinct", + /* 245 */ "selcollist", + /* 246 */ "from", + /* 247 */ "where_opt", + /* 248 */ "groupby_opt", + /* 249 */ "having_opt", + /* 250 */ "orderby_opt", + /* 251 */ "limit_opt", + /* 252 */ "window_clause", + /* 253 */ "values", + /* 254 */ "nexprlist", + /* 255 */ "mvalues", + /* 256 */ "sclp", + /* 257 */ "as", + /* 258 */ "seltablist", + /* 259 */ "stl_prefix", + /* 260 */ "joinop", + /* 261 */ "on_using", + /* 262 */ "indexed_by", + /* 263 */ "exprlist", + /* 264 */ "xfullname", + /* 265 */ "idlist", + /* 266 */ "indexed_opt", + /* 267 */ "nulls", + /* 268 */ "with", + /* 269 */ "where_opt_ret", + /* 270 */ "setlist", + /* 271 */ "insert_cmd", + /* 272 */ "idlist_opt", + /* 273 */ "upsert", + /* 274 */ "returning", + /* 275 */ "filter_over", + /* 276 */ "likeop", + /* 277 */ "between_op", + /* 278 */ "in_op", + /* 279 */ "paren_exprlist", + /* 280 */ "case_operand", + /* 281 */ "case_exprlist", + /* 282 */ "case_else", + /* 283 */ "uniqueflag", + /* 284 */ "collate", + /* 285 */ "vinto", + /* 286 */ "nmnum", + /* 287 */ "trigger_decl", + /* 288 */ "trigger_cmd_list", + /* 289 */ "trigger_time", + /* 290 */ "trigger_event", + /* 291 */ "foreach_clause", + /* 292 */ "when_clause", + /* 293 */ "trigger_cmd", + /* 294 */ "trnm", + /* 295 */ "tridxby", + /* 296 */ "database_kw_opt", + /* 297 */ "key_opt", + /* 298 */ "add_column_fullname", + /* 299 */ "kwcolumn_opt", + /* 300 */ "create_vtab", + /* 301 */ "vtabarglist", + /* 302 */ "vtabarg", + /* 303 */ "vtabargtoken", + /* 304 */ "lp", + /* 305 */ "anylist", + /* 306 */ "wqitem", + /* 307 */ "wqas", + /* 308 */ "withnm", + /* 309 */ "windowdefn_list", + /* 310 */ "windowdefn", + /* 311 */ "window", + /* 312 */ "frame_opt", + /* 313 */ "part_opt", + /* 314 */ "filter_clause", + /* 315 */ "over_clause", + /* 316 */ "range_or_rows", + /* 317 */ "frame_bound", + /* 318 */ "frame_bound_s", + /* 319 */ "frame_bound_e", + /* 320 */ "frame_exclude_opt", + /* 321 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -172823,351 +174413,363 @@ static const char *const yyRuleName[] = { /* 92 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", /* 93 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt", /* 94 */ "values ::= VALUES LP nexprlist RP", - /* 95 */ "values ::= values COMMA LP nexprlist RP", - /* 96 */ "distinct ::= DISTINCT", - /* 97 */ "distinct ::= ALL", - /* 98 */ "distinct ::=", - /* 99 */ "sclp ::=", - /* 100 */ "selcollist ::= sclp scanpt expr scanpt as", - /* 101 */ "selcollist ::= sclp scanpt STAR", - /* 102 */ "selcollist ::= sclp scanpt nm DOT STAR", - /* 103 */ "as ::= AS nm", - /* 104 */ "as ::=", - /* 105 */ "from ::=", - /* 106 */ "from ::= FROM seltablist", - /* 107 */ "stl_prefix ::= seltablist joinop", - /* 108 */ "stl_prefix ::=", - /* 109 */ "seltablist ::= stl_prefix nm dbnm as on_using", - /* 110 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using", - /* 111 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using", - /* 112 */ "seltablist ::= stl_prefix LP select RP as on_using", - /* 113 */ "seltablist ::= stl_prefix LP seltablist RP as on_using", - /* 114 */ "dbnm ::=", - /* 115 */ "dbnm ::= DOT nm", - /* 116 */ "fullname ::= nm", - /* 117 */ "fullname ::= nm DOT nm", - /* 118 */ "xfullname ::= nm", - /* 119 */ "xfullname ::= nm DOT nm", - /* 120 */ "xfullname ::= nm DOT nm AS nm", - /* 121 */ "xfullname ::= nm AS nm", - /* 122 */ "joinop ::= COMMA|JOIN", - /* 123 */ "joinop ::= JOIN_KW JOIN", - /* 124 */ "joinop ::= JOIN_KW nm JOIN", - /* 125 */ "joinop ::= JOIN_KW nm nm JOIN", - /* 126 */ "on_using ::= ON expr", - /* 127 */ "on_using ::= USING LP idlist RP", - /* 128 */ "on_using ::=", - /* 129 */ "indexed_opt ::=", - /* 130 */ "indexed_by ::= INDEXED BY nm", - /* 131 */ "indexed_by ::= NOT INDEXED", - /* 132 */ "orderby_opt ::=", - /* 133 */ "orderby_opt ::= ORDER BY sortlist", - /* 134 */ "sortlist ::= sortlist COMMA expr sortorder nulls", - /* 135 */ "sortlist ::= expr sortorder nulls", - /* 136 */ "sortorder ::= ASC", - /* 137 */ "sortorder ::= DESC", - /* 138 */ "sortorder ::=", - /* 139 */ "nulls ::= NULLS FIRST", - /* 140 */ "nulls ::= NULLS LAST", - /* 141 */ "nulls ::=", - /* 142 */ "groupby_opt ::=", - /* 143 */ "groupby_opt ::= GROUP BY nexprlist", - /* 144 */ "having_opt ::=", - /* 145 */ "having_opt ::= HAVING expr", - /* 146 */ "limit_opt ::=", - /* 147 */ "limit_opt ::= LIMIT expr", - /* 148 */ "limit_opt ::= LIMIT expr OFFSET expr", - /* 149 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 150 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret", - /* 151 */ "where_opt ::=", - /* 152 */ "where_opt ::= WHERE expr", - /* 153 */ "where_opt_ret ::=", - /* 154 */ "where_opt_ret ::= WHERE expr", - /* 155 */ "where_opt_ret ::= RETURNING selcollist", - /* 156 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", - /* 157 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret", - /* 158 */ "setlist ::= setlist COMMA nm EQ expr", - /* 159 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", - /* 160 */ "setlist ::= nm EQ expr", - /* 161 */ "setlist ::= LP idlist RP EQ expr", - /* 162 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", - /* 163 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", - /* 164 */ "upsert ::=", - /* 165 */ "upsert ::= RETURNING selcollist", - /* 166 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", - /* 167 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", - /* 168 */ "upsert ::= ON CONFLICT DO NOTHING returning", - /* 169 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", - /* 170 */ "returning ::= RETURNING selcollist", - /* 171 */ "insert_cmd ::= INSERT orconf", - /* 172 */ "insert_cmd ::= REPLACE", - /* 173 */ "idlist_opt ::=", - /* 174 */ "idlist_opt ::= LP idlist RP", - /* 175 */ "idlist ::= idlist COMMA nm", - /* 176 */ "idlist ::= nm", - /* 177 */ "expr ::= LP expr RP", - /* 178 */ "expr ::= ID|INDEXED|JOIN_KW", - /* 179 */ "expr ::= nm DOT nm", - /* 180 */ "expr ::= nm DOT nm DOT nm", - /* 181 */ "term ::= NULL|FLOAT|BLOB", - /* 182 */ "term ::= STRING", - /* 183 */ "term ::= INTEGER", - /* 184 */ "expr ::= VARIABLE", - /* 185 */ "expr ::= expr COLLATE ID|STRING", - /* 186 */ "expr ::= CAST LP expr AS typetoken RP", - /* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", - /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", - /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", - /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", - /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", - /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", - /* 193 */ "term ::= CTIME_KW", - /* 194 */ "expr ::= LP nexprlist COMMA expr RP", - /* 195 */ "expr ::= expr AND expr", - /* 196 */ "expr ::= expr OR expr", - /* 197 */ "expr ::= expr LT|GT|GE|LE expr", - /* 198 */ "expr ::= expr EQ|NE expr", - /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 200 */ "expr ::= expr PLUS|MINUS expr", - /* 201 */ "expr ::= expr STAR|SLASH|REM expr", - /* 202 */ "expr ::= expr CONCAT expr", - /* 203 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 204 */ "expr ::= expr likeop expr", - /* 205 */ "expr ::= expr likeop expr ESCAPE expr", - /* 206 */ "expr ::= expr ISNULL|NOTNULL", - /* 207 */ "expr ::= expr NOT NULL", - /* 208 */ "expr ::= expr IS expr", - /* 209 */ "expr ::= expr IS NOT expr", - /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr", - /* 211 */ "expr ::= expr IS DISTINCT FROM expr", - /* 212 */ "expr ::= NOT expr", - /* 213 */ "expr ::= BITNOT expr", - /* 214 */ "expr ::= PLUS|MINUS expr", - /* 215 */ "expr ::= expr PTR expr", - /* 216 */ "between_op ::= BETWEEN", - /* 217 */ "between_op ::= NOT BETWEEN", - /* 218 */ "expr ::= expr between_op expr AND expr", - /* 219 */ "in_op ::= IN", - /* 220 */ "in_op ::= NOT IN", - /* 221 */ "expr ::= expr in_op LP exprlist RP", - /* 222 */ "expr ::= LP select RP", - /* 223 */ "expr ::= expr in_op LP select RP", - /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 225 */ "expr ::= EXISTS LP select RP", - /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 228 */ "case_exprlist ::= WHEN expr THEN expr", - /* 229 */ "case_else ::= ELSE expr", - /* 230 */ "case_else ::=", - /* 231 */ "case_operand ::=", - /* 232 */ "exprlist ::=", - /* 233 */ "nexprlist ::= nexprlist COMMA expr", - /* 234 */ "nexprlist ::= expr", - /* 235 */ "paren_exprlist ::=", - /* 236 */ "paren_exprlist ::= LP exprlist RP", - /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 238 */ "uniqueflag ::= UNIQUE", - /* 239 */ "uniqueflag ::=", - /* 240 */ "eidlist_opt ::=", - /* 241 */ "eidlist_opt ::= LP eidlist RP", - /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 243 */ "eidlist ::= nm collate sortorder", - /* 244 */ "collate ::=", - /* 245 */ "collate ::= COLLATE ID|STRING", - /* 246 */ "cmd ::= DROP INDEX ifexists fullname", - /* 247 */ "cmd ::= VACUUM vinto", - /* 248 */ "cmd ::= VACUUM nm vinto", - /* 249 */ "vinto ::= INTO expr", - /* 250 */ "vinto ::=", - /* 251 */ "cmd ::= PRAGMA nm dbnm", - /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 260 */ "trigger_time ::= BEFORE|AFTER", - /* 261 */ "trigger_time ::= INSTEAD OF", - /* 262 */ "trigger_time ::=", - /* 263 */ "trigger_event ::= DELETE|INSERT", - /* 264 */ "trigger_event ::= UPDATE", - /* 265 */ "trigger_event ::= UPDATE OF idlist", - /* 266 */ "when_clause ::=", - /* 267 */ "when_clause ::= WHEN expr", - /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 270 */ "trnm ::= nm DOT nm", - /* 271 */ "tridxby ::= INDEXED BY nm", - /* 272 */ "tridxby ::= NOT INDEXED", - /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 276 */ "trigger_cmd ::= scanpt select scanpt", - /* 277 */ "expr ::= RAISE LP IGNORE RP", - /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 279 */ "raisetype ::= ROLLBACK", - /* 280 */ "raisetype ::= ABORT", - /* 281 */ "raisetype ::= FAIL", - /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 284 */ "cmd ::= DETACH database_kw_opt expr", - /* 285 */ "key_opt ::=", - /* 286 */ "key_opt ::= KEY expr", - /* 287 */ "cmd ::= REINDEX", - /* 288 */ "cmd ::= REINDEX nm dbnm", - /* 289 */ "cmd ::= ANALYZE", - /* 290 */ "cmd ::= ANALYZE nm dbnm", - /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", - /* 294 */ "add_column_fullname ::= fullname", - /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 296 */ "cmd ::= create_vtab", - /* 297 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 299 */ "vtabarg ::=", - /* 300 */ "vtabargtoken ::= ANY", - /* 301 */ "vtabargtoken ::= lp anylist RP", - /* 302 */ "lp ::= LP", - /* 303 */ "with ::= WITH wqlist", - /* 304 */ "with ::= WITH RECURSIVE wqlist", - /* 305 */ "wqas ::= AS", - /* 306 */ "wqas ::= AS MATERIALIZED", - /* 307 */ "wqas ::= AS NOT MATERIALIZED", - /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP", - /* 309 */ "wqlist ::= wqitem", - /* 310 */ "wqlist ::= wqlist COMMA wqitem", - /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 312 */ "windowdefn ::= nm AS LP window RP", - /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 315 */ "window ::= ORDER BY sortlist frame_opt", - /* 316 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 317 */ "window ::= nm frame_opt", - /* 318 */ "frame_opt ::=", - /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 322 */ "frame_bound_s ::= frame_bound", - /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 324 */ "frame_bound_e ::= frame_bound", - /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 327 */ "frame_bound ::= CURRENT ROW", - /* 328 */ "frame_exclude_opt ::=", - /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 330 */ "frame_exclude ::= NO OTHERS", - /* 331 */ "frame_exclude ::= CURRENT ROW", - /* 332 */ "frame_exclude ::= GROUP|TIES", - /* 333 */ "window_clause ::= WINDOW windowdefn_list", - /* 334 */ "filter_over ::= filter_clause over_clause", - /* 335 */ "filter_over ::= over_clause", - /* 336 */ "filter_over ::= filter_clause", - /* 337 */ "over_clause ::= OVER LP window RP", - /* 338 */ "over_clause ::= OVER nm", - /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 340 */ "input ::= cmdlist", - /* 341 */ "cmdlist ::= cmdlist ecmd", - /* 342 */ "cmdlist ::= ecmd", - /* 343 */ "ecmd ::= SEMI", - /* 344 */ "ecmd ::= cmdx SEMI", - /* 345 */ "ecmd ::= explain cmdx SEMI", - /* 346 */ "trans_opt ::=", - /* 347 */ "trans_opt ::= TRANSACTION", - /* 348 */ "trans_opt ::= TRANSACTION nm", - /* 349 */ "savepoint_opt ::= SAVEPOINT", - /* 350 */ "savepoint_opt ::=", - /* 351 */ "cmd ::= create_table create_table_args", - /* 352 */ "table_option_set ::= table_option", - /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 354 */ "columnlist ::= columnname carglist", - /* 355 */ "nm ::= ID|INDEXED|JOIN_KW", - /* 356 */ "nm ::= STRING", - /* 357 */ "typetoken ::= typename", - /* 358 */ "typename ::= ID|STRING", - /* 359 */ "signed ::= plus_num", - /* 360 */ "signed ::= minus_num", - /* 361 */ "carglist ::= carglist ccons", - /* 362 */ "carglist ::=", - /* 363 */ "ccons ::= NULL onconf", - /* 364 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 365 */ "ccons ::= AS generated", - /* 366 */ "conslist_opt ::= COMMA conslist", - /* 367 */ "conslist ::= conslist tconscomma tcons", - /* 368 */ "conslist ::= tcons", - /* 369 */ "tconscomma ::=", - /* 370 */ "defer_subclause_opt ::= defer_subclause", - /* 371 */ "resolvetype ::= raisetype", - /* 372 */ "selectnowith ::= oneselect", - /* 373 */ "oneselect ::= values", - /* 374 */ "sclp ::= selcollist COMMA", - /* 375 */ "as ::= ID|STRING", - /* 376 */ "indexed_opt ::= indexed_by", - /* 377 */ "returning ::=", - /* 378 */ "expr ::= term", - /* 379 */ "likeop ::= LIKE_KW|MATCH", - /* 380 */ "case_operand ::= expr", - /* 381 */ "exprlist ::= nexprlist", - /* 382 */ "nmnum ::= plus_num", - /* 383 */ "nmnum ::= nm", - /* 384 */ "nmnum ::= ON", - /* 385 */ "nmnum ::= DELETE", - /* 386 */ "nmnum ::= DEFAULT", - /* 387 */ "plus_num ::= INTEGER|FLOAT", - /* 388 */ "foreach_clause ::=", - /* 389 */ "foreach_clause ::= FOR EACH ROW", - /* 390 */ "trnm ::= nm", - /* 391 */ "tridxby ::=", - /* 392 */ "database_kw_opt ::= DATABASE", - /* 393 */ "database_kw_opt ::=", - /* 394 */ "kwcolumn_opt ::=", - /* 395 */ "kwcolumn_opt ::= COLUMNKW", - /* 396 */ "vtabarglist ::= vtabarg", - /* 397 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 398 */ "vtabarg ::= vtabarg vtabargtoken", - /* 399 */ "anylist ::=", - /* 400 */ "anylist ::= anylist LP anylist RP", - /* 401 */ "anylist ::= anylist ANY", - /* 402 */ "with ::=", - /* 403 */ "windowdefn_list ::= windowdefn", - /* 404 */ "window ::= frame_opt", + /* 95 */ "oneselect ::= mvalues", + /* 96 */ "mvalues ::= values COMMA LP nexprlist RP", + /* 97 */ "mvalues ::= mvalues COMMA LP nexprlist RP", + /* 98 */ "distinct ::= DISTINCT", + /* 99 */ "distinct ::= ALL", + /* 100 */ "distinct ::=", + /* 101 */ "sclp ::=", + /* 102 */ "selcollist ::= sclp scanpt expr scanpt as", + /* 103 */ "selcollist ::= sclp scanpt STAR", + /* 104 */ "selcollist ::= sclp scanpt nm DOT STAR", + /* 105 */ "as ::= AS nm", + /* 106 */ "as ::=", + /* 107 */ "from ::=", + /* 108 */ "from ::= FROM seltablist", + /* 109 */ "stl_prefix ::= seltablist joinop", + /* 110 */ "stl_prefix ::=", + /* 111 */ "seltablist ::= stl_prefix nm dbnm as on_using", + /* 112 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using", + /* 113 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using", + /* 114 */ "seltablist ::= stl_prefix LP select RP as on_using", + /* 115 */ "seltablist ::= stl_prefix LP seltablist RP as on_using", + /* 116 */ "dbnm ::=", + /* 117 */ "dbnm ::= DOT nm", + /* 118 */ "fullname ::= nm", + /* 119 */ "fullname ::= nm DOT nm", + /* 120 */ "xfullname ::= nm", + /* 121 */ "xfullname ::= nm DOT nm", + /* 122 */ "xfullname ::= nm DOT nm AS nm", + /* 123 */ "xfullname ::= nm AS nm", + /* 124 */ "joinop ::= COMMA|JOIN", + /* 125 */ "joinop ::= JOIN_KW JOIN", + /* 126 */ "joinop ::= JOIN_KW nm JOIN", + /* 127 */ "joinop ::= JOIN_KW nm nm JOIN", + /* 128 */ "on_using ::= ON expr", + /* 129 */ "on_using ::= USING LP idlist RP", + /* 130 */ "on_using ::=", + /* 131 */ "indexed_opt ::=", + /* 132 */ "indexed_by ::= INDEXED BY nm", + /* 133 */ "indexed_by ::= NOT INDEXED", + /* 134 */ "orderby_opt ::=", + /* 135 */ "orderby_opt ::= ORDER BY sortlist", + /* 136 */ "sortlist ::= sortlist COMMA expr sortorder nulls", + /* 137 */ "sortlist ::= expr sortorder nulls", + /* 138 */ "sortorder ::= ASC", + /* 139 */ "sortorder ::= DESC", + /* 140 */ "sortorder ::=", + /* 141 */ "nulls ::= NULLS FIRST", + /* 142 */ "nulls ::= NULLS LAST", + /* 143 */ "nulls ::=", + /* 144 */ "groupby_opt ::=", + /* 145 */ "groupby_opt ::= GROUP BY nexprlist", + /* 146 */ "having_opt ::=", + /* 147 */ "having_opt ::= HAVING expr", + /* 148 */ "limit_opt ::=", + /* 149 */ "limit_opt ::= LIMIT expr", + /* 150 */ "limit_opt ::= LIMIT expr OFFSET expr", + /* 151 */ "limit_opt ::= LIMIT expr COMMA expr", + /* 152 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret", + /* 153 */ "where_opt ::=", + /* 154 */ "where_opt ::= WHERE expr", + /* 155 */ "where_opt_ret ::=", + /* 156 */ "where_opt_ret ::= WHERE expr", + /* 157 */ "where_opt_ret ::= RETURNING selcollist", + /* 158 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", + /* 159 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret", + /* 160 */ "setlist ::= setlist COMMA nm EQ expr", + /* 161 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 162 */ "setlist ::= nm EQ expr", + /* 163 */ "setlist ::= LP idlist RP EQ expr", + /* 164 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", + /* 165 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", + /* 166 */ "upsert ::=", + /* 167 */ "upsert ::= RETURNING selcollist", + /* 168 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", + /* 169 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", + /* 170 */ "upsert ::= ON CONFLICT DO NOTHING returning", + /* 171 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", + /* 172 */ "returning ::= RETURNING selcollist", + /* 173 */ "insert_cmd ::= INSERT orconf", + /* 174 */ "insert_cmd ::= REPLACE", + /* 175 */ "idlist_opt ::=", + /* 176 */ "idlist_opt ::= LP idlist RP", + /* 177 */ "idlist ::= idlist COMMA nm", + /* 178 */ "idlist ::= nm", + /* 179 */ "expr ::= LP expr RP", + /* 180 */ "expr ::= ID|INDEXED|JOIN_KW", + /* 181 */ "expr ::= nm DOT nm", + /* 182 */ "expr ::= nm DOT nm DOT nm", + /* 183 */ "term ::= NULL|FLOAT|BLOB", + /* 184 */ "term ::= STRING", + /* 185 */ "term ::= INTEGER", + /* 186 */ "expr ::= VARIABLE", + /* 187 */ "expr ::= expr COLLATE ID|STRING", + /* 188 */ "expr ::= CAST LP expr AS typetoken RP", + /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", + /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", + /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", + /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", + /* 193 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", + /* 194 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", + /* 195 */ "term ::= CTIME_KW", + /* 196 */ "expr ::= LP nexprlist COMMA expr RP", + /* 197 */ "expr ::= expr AND expr", + /* 198 */ "expr ::= expr OR expr", + /* 199 */ "expr ::= expr LT|GT|GE|LE expr", + /* 200 */ "expr ::= expr EQ|NE expr", + /* 201 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 202 */ "expr ::= expr PLUS|MINUS expr", + /* 203 */ "expr ::= expr STAR|SLASH|REM expr", + /* 204 */ "expr ::= expr CONCAT expr", + /* 205 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 206 */ "expr ::= expr likeop expr", + /* 207 */ "expr ::= expr likeop expr ESCAPE expr", + /* 208 */ "expr ::= expr ISNULL|NOTNULL", + /* 209 */ "expr ::= expr NOT NULL", + /* 210 */ "expr ::= expr IS expr", + /* 211 */ "expr ::= expr IS NOT expr", + /* 212 */ "expr ::= expr IS NOT DISTINCT FROM expr", + /* 213 */ "expr ::= expr IS DISTINCT FROM expr", + /* 214 */ "expr ::= NOT expr", + /* 215 */ "expr ::= BITNOT expr", + /* 216 */ "expr ::= PLUS|MINUS expr", + /* 217 */ "expr ::= expr PTR expr", + /* 218 */ "between_op ::= BETWEEN", + /* 219 */ "between_op ::= NOT BETWEEN", + /* 220 */ "expr ::= expr between_op expr AND expr", + /* 221 */ "in_op ::= IN", + /* 222 */ "in_op ::= NOT IN", + /* 223 */ "expr ::= expr in_op LP exprlist RP", + /* 224 */ "expr ::= LP select RP", + /* 225 */ "expr ::= expr in_op LP select RP", + /* 226 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 227 */ "expr ::= EXISTS LP select RP", + /* 228 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 229 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 230 */ "case_exprlist ::= WHEN expr THEN expr", + /* 231 */ "case_else ::= ELSE expr", + /* 232 */ "case_else ::=", + /* 233 */ "case_operand ::=", + /* 234 */ "exprlist ::=", + /* 235 */ "nexprlist ::= nexprlist COMMA expr", + /* 236 */ "nexprlist ::= expr", + /* 237 */ "paren_exprlist ::=", + /* 238 */ "paren_exprlist ::= LP exprlist RP", + /* 239 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 240 */ "uniqueflag ::= UNIQUE", + /* 241 */ "uniqueflag ::=", + /* 242 */ "eidlist_opt ::=", + /* 243 */ "eidlist_opt ::= LP eidlist RP", + /* 244 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 245 */ "eidlist ::= nm collate sortorder", + /* 246 */ "collate ::=", + /* 247 */ "collate ::= COLLATE ID|STRING", + /* 248 */ "cmd ::= DROP INDEX ifexists fullname", + /* 249 */ "cmd ::= VACUUM vinto", + /* 250 */ "cmd ::= VACUUM nm vinto", + /* 251 */ "vinto ::= INTO expr", + /* 252 */ "vinto ::=", + /* 253 */ "cmd ::= PRAGMA nm dbnm", + /* 254 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 255 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 256 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 257 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 258 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 259 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 260 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 261 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 262 */ "trigger_time ::= BEFORE|AFTER", + /* 263 */ "trigger_time ::= INSTEAD OF", + /* 264 */ "trigger_time ::=", + /* 265 */ "trigger_event ::= DELETE|INSERT", + /* 266 */ "trigger_event ::= UPDATE", + /* 267 */ "trigger_event ::= UPDATE OF idlist", + /* 268 */ "when_clause ::=", + /* 269 */ "when_clause ::= WHEN expr", + /* 270 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 271 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 272 */ "trnm ::= nm DOT nm", + /* 273 */ "tridxby ::= INDEXED BY nm", + /* 274 */ "tridxby ::= NOT INDEXED", + /* 275 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 276 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 277 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 278 */ "trigger_cmd ::= scanpt select scanpt", + /* 279 */ "expr ::= RAISE LP IGNORE RP", + /* 280 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 281 */ "raisetype ::= ROLLBACK", + /* 282 */ "raisetype ::= ABORT", + /* 283 */ "raisetype ::= FAIL", + /* 284 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 285 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 286 */ "cmd ::= DETACH database_kw_opt expr", + /* 287 */ "key_opt ::=", + /* 288 */ "key_opt ::= KEY expr", + /* 289 */ "cmd ::= REINDEX", + /* 290 */ "cmd ::= REINDEX nm dbnm", + /* 291 */ "cmd ::= ANALYZE", + /* 292 */ "cmd ::= ANALYZE nm dbnm", + /* 293 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 294 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 295 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 296 */ "add_column_fullname ::= fullname", + /* 297 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 298 */ "cmd ::= create_vtab", + /* 299 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 300 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 301 */ "vtabarg ::=", + /* 302 */ "vtabargtoken ::= ANY", + /* 303 */ "vtabargtoken ::= lp anylist RP", + /* 304 */ "lp ::= LP", + /* 305 */ "with ::= WITH wqlist", + /* 306 */ "with ::= WITH RECURSIVE wqlist", + /* 307 */ "wqas ::= AS", + /* 308 */ "wqas ::= AS MATERIALIZED", + /* 309 */ "wqas ::= AS NOT MATERIALIZED", + /* 310 */ "wqitem ::= withnm eidlist_opt wqas LP select RP", + /* 311 */ "withnm ::= nm", + /* 312 */ "wqlist ::= wqitem", + /* 313 */ "wqlist ::= wqlist COMMA wqitem", + /* 314 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 315 */ "windowdefn ::= nm AS LP window RP", + /* 316 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 317 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 318 */ "window ::= ORDER BY sortlist frame_opt", + /* 319 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 320 */ "window ::= nm frame_opt", + /* 321 */ "frame_opt ::=", + /* 322 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 323 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 324 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 325 */ "frame_bound_s ::= frame_bound", + /* 326 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 327 */ "frame_bound_e ::= frame_bound", + /* 328 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 329 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 330 */ "frame_bound ::= CURRENT ROW", + /* 331 */ "frame_exclude_opt ::=", + /* 332 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 333 */ "frame_exclude ::= NO OTHERS", + /* 334 */ "frame_exclude ::= CURRENT ROW", + /* 335 */ "frame_exclude ::= GROUP|TIES", + /* 336 */ "window_clause ::= WINDOW windowdefn_list", + /* 337 */ "filter_over ::= filter_clause over_clause", + /* 338 */ "filter_over ::= over_clause", + /* 339 */ "filter_over ::= filter_clause", + /* 340 */ "over_clause ::= OVER LP window RP", + /* 341 */ "over_clause ::= OVER nm", + /* 342 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 343 */ "term ::= QNUMBER", + /* 344 */ "input ::= cmdlist", + /* 345 */ "cmdlist ::= cmdlist ecmd", + /* 346 */ "cmdlist ::= ecmd", + /* 347 */ "ecmd ::= SEMI", + /* 348 */ "ecmd ::= cmdx SEMI", + /* 349 */ "ecmd ::= explain cmdx SEMI", + /* 350 */ "trans_opt ::=", + /* 351 */ "trans_opt ::= TRANSACTION", + /* 352 */ "trans_opt ::= TRANSACTION nm", + /* 353 */ "savepoint_opt ::= SAVEPOINT", + /* 354 */ "savepoint_opt ::=", + /* 355 */ "cmd ::= create_table create_table_args", + /* 356 */ "table_option_set ::= table_option", + /* 357 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 358 */ "columnlist ::= columnname carglist", + /* 359 */ "nm ::= ID|INDEXED|JOIN_KW", + /* 360 */ "nm ::= STRING", + /* 361 */ "typetoken ::= typename", + /* 362 */ "typename ::= ID|STRING", + /* 363 */ "signed ::= plus_num", + /* 364 */ "signed ::= minus_num", + /* 365 */ "carglist ::= carglist ccons", + /* 366 */ "carglist ::=", + /* 367 */ "ccons ::= NULL onconf", + /* 368 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 369 */ "ccons ::= AS generated", + /* 370 */ "conslist_opt ::= COMMA conslist", + /* 371 */ "conslist ::= conslist tconscomma tcons", + /* 372 */ "conslist ::= tcons", + /* 373 */ "tconscomma ::=", + /* 374 */ "defer_subclause_opt ::= defer_subclause", + /* 375 */ "resolvetype ::= raisetype", + /* 376 */ "selectnowith ::= oneselect", + /* 377 */ "oneselect ::= values", + /* 378 */ "sclp ::= selcollist COMMA", + /* 379 */ "as ::= ID|STRING", + /* 380 */ "indexed_opt ::= indexed_by", + /* 381 */ "returning ::=", + /* 382 */ "expr ::= term", + /* 383 */ "likeop ::= LIKE_KW|MATCH", + /* 384 */ "case_operand ::= expr", + /* 385 */ "exprlist ::= nexprlist", + /* 386 */ "nmnum ::= plus_num", + /* 387 */ "nmnum ::= nm", + /* 388 */ "nmnum ::= ON", + /* 389 */ "nmnum ::= DELETE", + /* 390 */ "nmnum ::= DEFAULT", + /* 391 */ "plus_num ::= INTEGER|FLOAT", + /* 392 */ "foreach_clause ::=", + /* 393 */ "foreach_clause ::= FOR EACH ROW", + /* 394 */ "trnm ::= nm", + /* 395 */ "tridxby ::=", + /* 396 */ "database_kw_opt ::= DATABASE", + /* 397 */ "database_kw_opt ::=", + /* 398 */ "kwcolumn_opt ::=", + /* 399 */ "kwcolumn_opt ::= COLUMNKW", + /* 400 */ "vtabarglist ::= vtabarg", + /* 401 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 402 */ "vtabarg ::= vtabarg vtabargtoken", + /* 403 */ "anylist ::=", + /* 404 */ "anylist ::= anylist LP anylist RP", + /* 405 */ "anylist ::= anylist ANY", + /* 406 */ "with ::=", + /* 407 */ "windowdefn_list ::= windowdefn", + /* 408 */ "window ::= frame_opt", }; #endif /* NDEBUG */ -#if YYSTACKDEPTH<=0 +#if YYGROWABLESTACK /* ** Try to increase the size of the parser stack. Return the number ** of errors. Return 0 on success. */ static int yyGrowStack(yyParser *p){ + int oldSize = 1 + (int)(p->yystackEnd - p->yystack); int newSize; int idx; yyStackEntry *pNew; - newSize = p->yystksz*2 + 100; - idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; - if( p->yystack==&p->yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->yystk0; + newSize = oldSize*2 + 100; + idx = (int)(p->yytos - p->yystack); + if( p->yystack==p->yystk0 ){ + pNew = YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->yystack, oldSize*sizeof(pNew[0])); }else{ - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + pNew = YYREALLOC(p->yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; } - if( pNew ){ - p->yystack = pNew; - p->yytos = &p->yystack[idx]; + p->yystack = pNew; + p->yytos = &p->yystack[idx]; #ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", - yyTracePrompt, p->yystksz, newSize); - } -#endif - p->yystksz = newSize; + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, oldSize, newSize); } - return pNew==0; +#endif + p->yystackEnd = &p->yystack[newSize-1]; + return 0; } +#endif /* YYGROWABLESTACK */ + +#if !YYGROWABLESTACK +/* For builds that do no have a growable stack, yyGrowStack always +** returns an error. +*/ +# define yyGrowStack(X) 1 #endif /* Datatype of the argument to the memory allocated passed as the @@ -173187,24 +174789,14 @@ SQLITE_PRIVATE void sqlite3ParserInit(void *yypRawParser sqlite3ParserCTX_PDECL) #ifdef YYTRACKMAXSTACKDEPTH yypParser->yyhwm = 0; #endif -#if YYSTACKDEPTH<=0 - yypParser->yytos = NULL; - yypParser->yystack = NULL; - yypParser->yystksz = 0; - if( yyGrowStack(yypParser) ){ - yypParser->yystack = &yypParser->yystk0; - yypParser->yystksz = 1; - } -#endif + yypParser->yystack = yypParser->yystk0; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt = -1; #endif yypParser->yytos = yypParser->yystack; yypParser->yystack[0].stateno = 0; yypParser->yystack[0].major = 0; -#if YYSTACKDEPTH>0 - yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; -#endif } #ifndef sqlite3Parser_ENGINEALWAYSONSTACK @@ -173258,97 +174850,98 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 204: /* select */ - case 239: /* selectnowith */ - case 240: /* oneselect */ - case 252: /* values */ + case 205: /* select */ + case 240: /* selectnowith */ + case 241: /* oneselect */ + case 253: /* values */ + case 255: /* mvalues */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy47)); -} - break; - case 216: /* term */ - case 217: /* expr */ - case 246: /* where_opt */ - case 248: /* having_opt */ - case 267: /* where_opt_ret */ - case 278: /* case_operand */ - case 280: /* case_else */ - case 283: /* vinto */ - case 290: /* when_clause */ - case 295: /* key_opt */ - case 311: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy555)); +} + break; + case 217: /* term */ + case 218: /* expr */ + case 247: /* where_opt */ + case 249: /* having_opt */ + case 269: /* where_opt_ret */ + case 280: /* case_operand */ + case 282: /* case_else */ + case 285: /* vinto */ + case 292: /* when_clause */ + case 297: /* key_opt */ + case 314: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy528)); -} - break; - case 221: /* eidlist_opt */ - case 231: /* sortlist */ - case 232: /* eidlist */ - case 244: /* selcollist */ - case 247: /* groupby_opt */ - case 249: /* orderby_opt */ - case 253: /* nexprlist */ - case 254: /* sclp */ - case 261: /* exprlist */ - case 268: /* setlist */ - case 277: /* paren_exprlist */ - case 279: /* case_exprlist */ - case 310: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy454)); +} + break; + case 222: /* eidlist_opt */ + case 232: /* sortlist */ + case 233: /* eidlist */ + case 245: /* selcollist */ + case 248: /* groupby_opt */ + case 250: /* orderby_opt */ + case 254: /* nexprlist */ + case 256: /* sclp */ + case 263: /* exprlist */ + case 270: /* setlist */ + case 279: /* paren_exprlist */ + case 281: /* case_exprlist */ + case 313: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy322)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); } break; - case 238: /* fullname */ - case 245: /* from */ - case 256: /* seltablist */ - case 257: /* stl_prefix */ - case 262: /* xfullname */ + case 239: /* fullname */ + case 246: /* from */ + case 258: /* seltablist */ + case 259: /* stl_prefix */ + case 264: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy131)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy203)); } break; - case 241: /* wqlist */ + case 242: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy521)); +sqlite3WithDelete(pParse->db, (yypminor->yy59)); } break; - case 251: /* window_clause */ - case 306: /* windowdefn_list */ + case 252: /* window_clause */ + case 309: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy41)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy211)); } break; - case 263: /* idlist */ - case 270: /* idlist_opt */ + case 265: /* idlist */ + case 272: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy254)); +sqlite3IdListDelete(pParse->db, (yypminor->yy132)); } break; - case 273: /* filter_over */ - case 307: /* windowdefn */ - case 308: /* window */ - case 309: /* frame_opt */ - case 312: /* over_clause */ + case 275: /* filter_over */ + case 310: /* windowdefn */ + case 311: /* window */ + case 312: /* frame_opt */ + case 315: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy41)); +sqlite3WindowDelete(pParse->db, (yypminor->yy211)); } break; - case 286: /* trigger_cmd_list */ - case 291: /* trigger_cmd */ + case 288: /* trigger_cmd_list */ + case 293: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy33)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy427)); } break; - case 288: /* trigger_event */ + case 290: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy180).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy286).b); } break; - case 314: /* frame_bound */ - case 315: /* frame_bound_s */ - case 316: /* frame_bound_e */ + case 317: /* frame_bound */ + case 318: /* frame_bound_s */ + case 319: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy595).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy509).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -173382,9 +174975,26 @@ static void yy_pop_parser_stack(yyParser *pParser){ */ SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){ yyParser *pParser = (yyParser*)p; - while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); + + /* In-lined version of calling yy_pop_parser_stack() for each + ** element left in the stack */ + yyStackEntry *yytos = pParser->yytos; + while( yytos>pParser->yystack ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + if( yytos->major>=YY_MIN_DSTRCTR ){ + yy_destructor(pParser, yytos->major, &yytos->minor); + } + yytos--; + } + +#if YYGROWABLESTACK + if( pParser->yystack!=pParser->yystk0 ) YYFREE(pParser->yystack); #endif } @@ -173567,7 +175177,7 @@ static void yyStackOverflow(yyParser *yypParser){ ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ - sqlite3ErrorMsg(pParse, "parser stack overflow"); + sqlite3OomFault(pParse->db); /******** End %stack_overflow code ********************************************/ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument var */ sqlite3ParserCTX_STORE @@ -173611,25 +175221,19 @@ static void yy_shift( assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); } #endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>yypParser->yystackEnd ){ - yypParser->yytos--; - yyStackOverflow(yypParser); - return; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + yytos = yypParser->yytos; + if( yytos>yypParser->yystackEnd ){ if( yyGrowStack(yypParser) ){ yypParser->yytos--; yyStackOverflow(yypParser); return; } + yytos = yypParser->yytos; + assert( yytos <= yypParser->yystackEnd ); } -#endif if( yyNewState > YY_MAX_SHIFT ){ yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; } - yytos = yypParser->yytos; yytos->stateno = yyNewState; yytos->major = yyMajor; yytos->minor.yy0 = yyMinor; @@ -173639,411 +175243,415 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 189, /* (0) explain ::= EXPLAIN */ - 189, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 188, /* (2) cmdx ::= cmd */ - 190, /* (3) cmd ::= BEGIN transtype trans_opt */ - 191, /* (4) transtype ::= */ - 191, /* (5) transtype ::= DEFERRED */ - 191, /* (6) transtype ::= IMMEDIATE */ - 191, /* (7) transtype ::= EXCLUSIVE */ - 190, /* (8) cmd ::= COMMIT|END trans_opt */ - 190, /* (9) cmd ::= ROLLBACK trans_opt */ - 190, /* (10) cmd ::= SAVEPOINT nm */ - 190, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 190, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 195, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 197, /* (14) createkw ::= CREATE */ - 199, /* (15) ifnotexists ::= */ - 199, /* (16) ifnotexists ::= IF NOT EXISTS */ - 198, /* (17) temp ::= TEMP */ - 198, /* (18) temp ::= */ - 196, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ - 196, /* (20) create_table_args ::= AS select */ - 203, /* (21) table_option_set ::= */ - 203, /* (22) table_option_set ::= table_option_set COMMA table_option */ - 205, /* (23) table_option ::= WITHOUT nm */ - 205, /* (24) table_option ::= nm */ - 206, /* (25) columnname ::= nm typetoken */ - 208, /* (26) typetoken ::= */ - 208, /* (27) typetoken ::= typename LP signed RP */ - 208, /* (28) typetoken ::= typename LP signed COMMA signed RP */ - 209, /* (29) typename ::= typename ID|STRING */ - 213, /* (30) scanpt ::= */ - 214, /* (31) scantok ::= */ - 215, /* (32) ccons ::= CONSTRAINT nm */ - 215, /* (33) ccons ::= DEFAULT scantok term */ - 215, /* (34) ccons ::= DEFAULT LP expr RP */ - 215, /* (35) ccons ::= DEFAULT PLUS scantok term */ - 215, /* (36) ccons ::= DEFAULT MINUS scantok term */ - 215, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ - 215, /* (38) ccons ::= NOT NULL onconf */ - 215, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 215, /* (40) ccons ::= UNIQUE onconf */ - 215, /* (41) ccons ::= CHECK LP expr RP */ - 215, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ - 215, /* (43) ccons ::= defer_subclause */ - 215, /* (44) ccons ::= COLLATE ID|STRING */ - 224, /* (45) generated ::= LP expr RP */ - 224, /* (46) generated ::= LP expr RP ID */ - 220, /* (47) autoinc ::= */ - 220, /* (48) autoinc ::= AUTOINCR */ - 222, /* (49) refargs ::= */ - 222, /* (50) refargs ::= refargs refarg */ - 225, /* (51) refarg ::= MATCH nm */ - 225, /* (52) refarg ::= ON INSERT refact */ - 225, /* (53) refarg ::= ON DELETE refact */ - 225, /* (54) refarg ::= ON UPDATE refact */ - 226, /* (55) refact ::= SET NULL */ - 226, /* (56) refact ::= SET DEFAULT */ - 226, /* (57) refact ::= CASCADE */ - 226, /* (58) refact ::= RESTRICT */ - 226, /* (59) refact ::= NO ACTION */ - 223, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 223, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 227, /* (62) init_deferred_pred_opt ::= */ - 227, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 227, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 202, /* (65) conslist_opt ::= */ - 229, /* (66) tconscomma ::= COMMA */ - 230, /* (67) tcons ::= CONSTRAINT nm */ - 230, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 230, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ - 230, /* (70) tcons ::= CHECK LP expr RP onconf */ - 230, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 233, /* (72) defer_subclause_opt ::= */ - 218, /* (73) onconf ::= */ - 218, /* (74) onconf ::= ON CONFLICT resolvetype */ - 234, /* (75) orconf ::= */ - 234, /* (76) orconf ::= OR resolvetype */ - 235, /* (77) resolvetype ::= IGNORE */ - 235, /* (78) resolvetype ::= REPLACE */ - 190, /* (79) cmd ::= DROP TABLE ifexists fullname */ - 237, /* (80) ifexists ::= IF EXISTS */ - 237, /* (81) ifexists ::= */ - 190, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 190, /* (83) cmd ::= DROP VIEW ifexists fullname */ - 190, /* (84) cmd ::= select */ - 204, /* (85) select ::= WITH wqlist selectnowith */ - 204, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ - 204, /* (87) select ::= selectnowith */ - 239, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ - 242, /* (89) multiselect_op ::= UNION */ - 242, /* (90) multiselect_op ::= UNION ALL */ - 242, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ - 240, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 240, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 252, /* (94) values ::= VALUES LP nexprlist RP */ - 252, /* (95) values ::= values COMMA LP nexprlist RP */ - 243, /* (96) distinct ::= DISTINCT */ - 243, /* (97) distinct ::= ALL */ - 243, /* (98) distinct ::= */ - 254, /* (99) sclp ::= */ - 244, /* (100) selcollist ::= sclp scanpt expr scanpt as */ - 244, /* (101) selcollist ::= sclp scanpt STAR */ - 244, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ - 255, /* (103) as ::= AS nm */ - 255, /* (104) as ::= */ - 245, /* (105) from ::= */ - 245, /* (106) from ::= FROM seltablist */ - 257, /* (107) stl_prefix ::= seltablist joinop */ - 257, /* (108) stl_prefix ::= */ - 256, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */ - 256, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - 256, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - 256, /* (112) seltablist ::= stl_prefix LP select RP as on_using */ - 256, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 200, /* (114) dbnm ::= */ - 200, /* (115) dbnm ::= DOT nm */ - 238, /* (116) fullname ::= nm */ - 238, /* (117) fullname ::= nm DOT nm */ - 262, /* (118) xfullname ::= nm */ - 262, /* (119) xfullname ::= nm DOT nm */ - 262, /* (120) xfullname ::= nm DOT nm AS nm */ - 262, /* (121) xfullname ::= nm AS nm */ - 258, /* (122) joinop ::= COMMA|JOIN */ - 258, /* (123) joinop ::= JOIN_KW JOIN */ - 258, /* (124) joinop ::= JOIN_KW nm JOIN */ - 258, /* (125) joinop ::= JOIN_KW nm nm JOIN */ - 259, /* (126) on_using ::= ON expr */ - 259, /* (127) on_using ::= USING LP idlist RP */ - 259, /* (128) on_using ::= */ - 264, /* (129) indexed_opt ::= */ - 260, /* (130) indexed_by ::= INDEXED BY nm */ - 260, /* (131) indexed_by ::= NOT INDEXED */ - 249, /* (132) orderby_opt ::= */ - 249, /* (133) orderby_opt ::= ORDER BY sortlist */ - 231, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ - 231, /* (135) sortlist ::= expr sortorder nulls */ - 219, /* (136) sortorder ::= ASC */ - 219, /* (137) sortorder ::= DESC */ - 219, /* (138) sortorder ::= */ - 265, /* (139) nulls ::= NULLS FIRST */ - 265, /* (140) nulls ::= NULLS LAST */ - 265, /* (141) nulls ::= */ - 247, /* (142) groupby_opt ::= */ - 247, /* (143) groupby_opt ::= GROUP BY nexprlist */ - 248, /* (144) having_opt ::= */ - 248, /* (145) having_opt ::= HAVING expr */ - 250, /* (146) limit_opt ::= */ - 250, /* (147) limit_opt ::= LIMIT expr */ - 250, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ - 250, /* (149) limit_opt ::= LIMIT expr COMMA expr */ - 190, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 246, /* (151) where_opt ::= */ - 246, /* (152) where_opt ::= WHERE expr */ - 267, /* (153) where_opt_ret ::= */ - 267, /* (154) where_opt_ret ::= WHERE expr */ - 267, /* (155) where_opt_ret ::= RETURNING selcollist */ - 267, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 190, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - 268, /* (158) setlist ::= setlist COMMA nm EQ expr */ - 268, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 268, /* (160) setlist ::= nm EQ expr */ - 268, /* (161) setlist ::= LP idlist RP EQ expr */ - 190, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 190, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 271, /* (164) upsert ::= */ - 271, /* (165) upsert ::= RETURNING selcollist */ - 271, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 271, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 271, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ - 271, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 272, /* (170) returning ::= RETURNING selcollist */ - 269, /* (171) insert_cmd ::= INSERT orconf */ - 269, /* (172) insert_cmd ::= REPLACE */ - 270, /* (173) idlist_opt ::= */ - 270, /* (174) idlist_opt ::= LP idlist RP */ - 263, /* (175) idlist ::= idlist COMMA nm */ - 263, /* (176) idlist ::= nm */ - 217, /* (177) expr ::= LP expr RP */ - 217, /* (178) expr ::= ID|INDEXED|JOIN_KW */ - 217, /* (179) expr ::= nm DOT nm */ - 217, /* (180) expr ::= nm DOT nm DOT nm */ - 216, /* (181) term ::= NULL|FLOAT|BLOB */ - 216, /* (182) term ::= STRING */ - 216, /* (183) term ::= INTEGER */ - 217, /* (184) expr ::= VARIABLE */ - 217, /* (185) expr ::= expr COLLATE ID|STRING */ - 217, /* (186) expr ::= CAST LP expr AS typetoken RP */ - 217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - 216, /* (193) term ::= CTIME_KW */ - 217, /* (194) expr ::= LP nexprlist COMMA expr RP */ - 217, /* (195) expr ::= expr AND expr */ - 217, /* (196) expr ::= expr OR expr */ - 217, /* (197) expr ::= expr LT|GT|GE|LE expr */ - 217, /* (198) expr ::= expr EQ|NE expr */ - 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 217, /* (200) expr ::= expr PLUS|MINUS expr */ - 217, /* (201) expr ::= expr STAR|SLASH|REM expr */ - 217, /* (202) expr ::= expr CONCAT expr */ - 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */ - 217, /* (204) expr ::= expr likeop expr */ - 217, /* (205) expr ::= expr likeop expr ESCAPE expr */ - 217, /* (206) expr ::= expr ISNULL|NOTNULL */ - 217, /* (207) expr ::= expr NOT NULL */ - 217, /* (208) expr ::= expr IS expr */ - 217, /* (209) expr ::= expr IS NOT expr */ - 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ - 217, /* (211) expr ::= expr IS DISTINCT FROM expr */ - 217, /* (212) expr ::= NOT expr */ - 217, /* (213) expr ::= BITNOT expr */ - 217, /* (214) expr ::= PLUS|MINUS expr */ - 217, /* (215) expr ::= expr PTR expr */ - 275, /* (216) between_op ::= BETWEEN */ - 275, /* (217) between_op ::= NOT BETWEEN */ - 217, /* (218) expr ::= expr between_op expr AND expr */ - 276, /* (219) in_op ::= IN */ - 276, /* (220) in_op ::= NOT IN */ - 217, /* (221) expr ::= expr in_op LP exprlist RP */ - 217, /* (222) expr ::= LP select RP */ - 217, /* (223) expr ::= expr in_op LP select RP */ - 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ - 217, /* (225) expr ::= EXISTS LP select RP */ - 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ - 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 279, /* (228) case_exprlist ::= WHEN expr THEN expr */ - 280, /* (229) case_else ::= ELSE expr */ - 280, /* (230) case_else ::= */ - 278, /* (231) case_operand ::= */ - 261, /* (232) exprlist ::= */ - 253, /* (233) nexprlist ::= nexprlist COMMA expr */ - 253, /* (234) nexprlist ::= expr */ - 277, /* (235) paren_exprlist ::= */ - 277, /* (236) paren_exprlist ::= LP exprlist RP */ - 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 281, /* (238) uniqueflag ::= UNIQUE */ - 281, /* (239) uniqueflag ::= */ - 221, /* (240) eidlist_opt ::= */ - 221, /* (241) eidlist_opt ::= LP eidlist RP */ - 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - 232, /* (243) eidlist ::= nm collate sortorder */ - 282, /* (244) collate ::= */ - 282, /* (245) collate ::= COLLATE ID|STRING */ - 190, /* (246) cmd ::= DROP INDEX ifexists fullname */ - 190, /* (247) cmd ::= VACUUM vinto */ - 190, /* (248) cmd ::= VACUUM nm vinto */ - 283, /* (249) vinto ::= INTO expr */ - 283, /* (250) vinto ::= */ - 190, /* (251) cmd ::= PRAGMA nm dbnm */ - 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 287, /* (260) trigger_time ::= BEFORE|AFTER */ - 287, /* (261) trigger_time ::= INSTEAD OF */ - 287, /* (262) trigger_time ::= */ - 288, /* (263) trigger_event ::= DELETE|INSERT */ - 288, /* (264) trigger_event ::= UPDATE */ - 288, /* (265) trigger_event ::= UPDATE OF idlist */ - 290, /* (266) when_clause ::= */ - 290, /* (267) when_clause ::= WHEN expr */ - 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - 292, /* (270) trnm ::= nm DOT nm */ - 293, /* (271) tridxby ::= INDEXED BY nm */ - 293, /* (272) tridxby ::= NOT INDEXED */ - 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 291, /* (276) trigger_cmd ::= scanpt select scanpt */ - 217, /* (277) expr ::= RAISE LP IGNORE RP */ - 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - 236, /* (279) raisetype ::= ROLLBACK */ - 236, /* (280) raisetype ::= ABORT */ - 236, /* (281) raisetype ::= FAIL */ - 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 190, /* (284) cmd ::= DETACH database_kw_opt expr */ - 295, /* (285) key_opt ::= */ - 295, /* (286) key_opt ::= KEY expr */ - 190, /* (287) cmd ::= REINDEX */ - 190, /* (288) cmd ::= REINDEX nm dbnm */ - 190, /* (289) cmd ::= ANALYZE */ - 190, /* (290) cmd ::= ANALYZE nm dbnm */ - 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 296, /* (294) add_column_fullname ::= fullname */ - 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 190, /* (296) cmd ::= create_vtab */ - 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 300, /* (299) vtabarg ::= */ - 301, /* (300) vtabargtoken ::= ANY */ - 301, /* (301) vtabargtoken ::= lp anylist RP */ - 302, /* (302) lp ::= LP */ - 266, /* (303) with ::= WITH wqlist */ - 266, /* (304) with ::= WITH RECURSIVE wqlist */ - 305, /* (305) wqas ::= AS */ - 305, /* (306) wqas ::= AS MATERIALIZED */ - 305, /* (307) wqas ::= AS NOT MATERIALIZED */ - 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - 241, /* (309) wqlist ::= wqitem */ - 241, /* (310) wqlist ::= wqlist COMMA wqitem */ - 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 307, /* (312) windowdefn ::= nm AS LP window RP */ - 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (315) window ::= ORDER BY sortlist frame_opt */ - 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */ - 308, /* (317) window ::= nm frame_opt */ - 309, /* (318) frame_opt ::= */ - 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ - 315, /* (322) frame_bound_s ::= frame_bound */ - 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ - 316, /* (324) frame_bound_e ::= frame_bound */ - 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ - 314, /* (327) frame_bound ::= CURRENT ROW */ - 317, /* (328) frame_exclude_opt ::= */ - 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 318, /* (330) frame_exclude ::= NO OTHERS */ - 318, /* (331) frame_exclude ::= CURRENT ROW */ - 318, /* (332) frame_exclude ::= GROUP|TIES */ - 251, /* (333) window_clause ::= WINDOW windowdefn_list */ - 273, /* (334) filter_over ::= filter_clause over_clause */ - 273, /* (335) filter_over ::= over_clause */ - 273, /* (336) filter_over ::= filter_clause */ - 312, /* (337) over_clause ::= OVER LP window RP */ - 312, /* (338) over_clause ::= OVER nm */ - 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ - 185, /* (340) input ::= cmdlist */ - 186, /* (341) cmdlist ::= cmdlist ecmd */ - 186, /* (342) cmdlist ::= ecmd */ - 187, /* (343) ecmd ::= SEMI */ - 187, /* (344) ecmd ::= cmdx SEMI */ - 187, /* (345) ecmd ::= explain cmdx SEMI */ - 192, /* (346) trans_opt ::= */ - 192, /* (347) trans_opt ::= TRANSACTION */ - 192, /* (348) trans_opt ::= TRANSACTION nm */ - 194, /* (349) savepoint_opt ::= SAVEPOINT */ - 194, /* (350) savepoint_opt ::= */ - 190, /* (351) cmd ::= create_table create_table_args */ - 203, /* (352) table_option_set ::= table_option */ - 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ - 201, /* (354) columnlist ::= columnname carglist */ - 193, /* (355) nm ::= ID|INDEXED|JOIN_KW */ - 193, /* (356) nm ::= STRING */ - 208, /* (357) typetoken ::= typename */ - 209, /* (358) typename ::= ID|STRING */ - 210, /* (359) signed ::= plus_num */ - 210, /* (360) signed ::= minus_num */ - 207, /* (361) carglist ::= carglist ccons */ - 207, /* (362) carglist ::= */ - 215, /* (363) ccons ::= NULL onconf */ - 215, /* (364) ccons ::= GENERATED ALWAYS AS generated */ - 215, /* (365) ccons ::= AS generated */ - 202, /* (366) conslist_opt ::= COMMA conslist */ - 228, /* (367) conslist ::= conslist tconscomma tcons */ - 228, /* (368) conslist ::= tcons */ - 229, /* (369) tconscomma ::= */ - 233, /* (370) defer_subclause_opt ::= defer_subclause */ - 235, /* (371) resolvetype ::= raisetype */ - 239, /* (372) selectnowith ::= oneselect */ - 240, /* (373) oneselect ::= values */ - 254, /* (374) sclp ::= selcollist COMMA */ - 255, /* (375) as ::= ID|STRING */ - 264, /* (376) indexed_opt ::= indexed_by */ - 272, /* (377) returning ::= */ - 217, /* (378) expr ::= term */ - 274, /* (379) likeop ::= LIKE_KW|MATCH */ - 278, /* (380) case_operand ::= expr */ - 261, /* (381) exprlist ::= nexprlist */ - 284, /* (382) nmnum ::= plus_num */ - 284, /* (383) nmnum ::= nm */ - 284, /* (384) nmnum ::= ON */ - 284, /* (385) nmnum ::= DELETE */ - 284, /* (386) nmnum ::= DEFAULT */ - 211, /* (387) plus_num ::= INTEGER|FLOAT */ - 289, /* (388) foreach_clause ::= */ - 289, /* (389) foreach_clause ::= FOR EACH ROW */ - 292, /* (390) trnm ::= nm */ - 293, /* (391) tridxby ::= */ - 294, /* (392) database_kw_opt ::= DATABASE */ - 294, /* (393) database_kw_opt ::= */ - 297, /* (394) kwcolumn_opt ::= */ - 297, /* (395) kwcolumn_opt ::= COLUMNKW */ - 299, /* (396) vtabarglist ::= vtabarg */ - 299, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ - 300, /* (398) vtabarg ::= vtabarg vtabargtoken */ - 303, /* (399) anylist ::= */ - 303, /* (400) anylist ::= anylist LP anylist RP */ - 303, /* (401) anylist ::= anylist ANY */ - 266, /* (402) with ::= */ - 306, /* (403) windowdefn_list ::= windowdefn */ - 308, /* (404) window ::= frame_opt */ + 190, /* (0) explain ::= EXPLAIN */ + 190, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 189, /* (2) cmdx ::= cmd */ + 191, /* (3) cmd ::= BEGIN transtype trans_opt */ + 192, /* (4) transtype ::= */ + 192, /* (5) transtype ::= DEFERRED */ + 192, /* (6) transtype ::= IMMEDIATE */ + 192, /* (7) transtype ::= EXCLUSIVE */ + 191, /* (8) cmd ::= COMMIT|END trans_opt */ + 191, /* (9) cmd ::= ROLLBACK trans_opt */ + 191, /* (10) cmd ::= SAVEPOINT nm */ + 191, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 191, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 196, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 198, /* (14) createkw ::= CREATE */ + 200, /* (15) ifnotexists ::= */ + 200, /* (16) ifnotexists ::= IF NOT EXISTS */ + 199, /* (17) temp ::= TEMP */ + 199, /* (18) temp ::= */ + 197, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 197, /* (20) create_table_args ::= AS select */ + 204, /* (21) table_option_set ::= */ + 204, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 206, /* (23) table_option ::= WITHOUT nm */ + 206, /* (24) table_option ::= nm */ + 207, /* (25) columnname ::= nm typetoken */ + 209, /* (26) typetoken ::= */ + 209, /* (27) typetoken ::= typename LP signed RP */ + 209, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 210, /* (29) typename ::= typename ID|STRING */ + 214, /* (30) scanpt ::= */ + 215, /* (31) scantok ::= */ + 216, /* (32) ccons ::= CONSTRAINT nm */ + 216, /* (33) ccons ::= DEFAULT scantok term */ + 216, /* (34) ccons ::= DEFAULT LP expr RP */ + 216, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 216, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 216, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 216, /* (38) ccons ::= NOT NULL onconf */ + 216, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 216, /* (40) ccons ::= UNIQUE onconf */ + 216, /* (41) ccons ::= CHECK LP expr RP */ + 216, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 216, /* (43) ccons ::= defer_subclause */ + 216, /* (44) ccons ::= COLLATE ID|STRING */ + 225, /* (45) generated ::= LP expr RP */ + 225, /* (46) generated ::= LP expr RP ID */ + 221, /* (47) autoinc ::= */ + 221, /* (48) autoinc ::= AUTOINCR */ + 223, /* (49) refargs ::= */ + 223, /* (50) refargs ::= refargs refarg */ + 226, /* (51) refarg ::= MATCH nm */ + 226, /* (52) refarg ::= ON INSERT refact */ + 226, /* (53) refarg ::= ON DELETE refact */ + 226, /* (54) refarg ::= ON UPDATE refact */ + 227, /* (55) refact ::= SET NULL */ + 227, /* (56) refact ::= SET DEFAULT */ + 227, /* (57) refact ::= CASCADE */ + 227, /* (58) refact ::= RESTRICT */ + 227, /* (59) refact ::= NO ACTION */ + 224, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 224, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 228, /* (62) init_deferred_pred_opt ::= */ + 228, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 228, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 203, /* (65) conslist_opt ::= */ + 230, /* (66) tconscomma ::= COMMA */ + 231, /* (67) tcons ::= CONSTRAINT nm */ + 231, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 231, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 231, /* (70) tcons ::= CHECK LP expr RP onconf */ + 231, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 234, /* (72) defer_subclause_opt ::= */ + 219, /* (73) onconf ::= */ + 219, /* (74) onconf ::= ON CONFLICT resolvetype */ + 235, /* (75) orconf ::= */ + 235, /* (76) orconf ::= OR resolvetype */ + 236, /* (77) resolvetype ::= IGNORE */ + 236, /* (78) resolvetype ::= REPLACE */ + 191, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 238, /* (80) ifexists ::= IF EXISTS */ + 238, /* (81) ifexists ::= */ + 191, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 191, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 191, /* (84) cmd ::= select */ + 205, /* (85) select ::= WITH wqlist selectnowith */ + 205, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 205, /* (87) select ::= selectnowith */ + 240, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 243, /* (89) multiselect_op ::= UNION */ + 243, /* (90) multiselect_op ::= UNION ALL */ + 243, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 241, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 241, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 253, /* (94) values ::= VALUES LP nexprlist RP */ + 241, /* (95) oneselect ::= mvalues */ + 255, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + 255, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + 244, /* (98) distinct ::= DISTINCT */ + 244, /* (99) distinct ::= ALL */ + 244, /* (100) distinct ::= */ + 256, /* (101) sclp ::= */ + 245, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + 245, /* (103) selcollist ::= sclp scanpt STAR */ + 245, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + 257, /* (105) as ::= AS nm */ + 257, /* (106) as ::= */ + 246, /* (107) from ::= */ + 246, /* (108) from ::= FROM seltablist */ + 259, /* (109) stl_prefix ::= seltablist joinop */ + 259, /* (110) stl_prefix ::= */ + 258, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + 258, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + 258, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + 258, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + 258, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 201, /* (116) dbnm ::= */ + 201, /* (117) dbnm ::= DOT nm */ + 239, /* (118) fullname ::= nm */ + 239, /* (119) fullname ::= nm DOT nm */ + 264, /* (120) xfullname ::= nm */ + 264, /* (121) xfullname ::= nm DOT nm */ + 264, /* (122) xfullname ::= nm DOT nm AS nm */ + 264, /* (123) xfullname ::= nm AS nm */ + 260, /* (124) joinop ::= COMMA|JOIN */ + 260, /* (125) joinop ::= JOIN_KW JOIN */ + 260, /* (126) joinop ::= JOIN_KW nm JOIN */ + 260, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + 261, /* (128) on_using ::= ON expr */ + 261, /* (129) on_using ::= USING LP idlist RP */ + 261, /* (130) on_using ::= */ + 266, /* (131) indexed_opt ::= */ + 262, /* (132) indexed_by ::= INDEXED BY nm */ + 262, /* (133) indexed_by ::= NOT INDEXED */ + 250, /* (134) orderby_opt ::= */ + 250, /* (135) orderby_opt ::= ORDER BY sortlist */ + 232, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + 232, /* (137) sortlist ::= expr sortorder nulls */ + 220, /* (138) sortorder ::= ASC */ + 220, /* (139) sortorder ::= DESC */ + 220, /* (140) sortorder ::= */ + 267, /* (141) nulls ::= NULLS FIRST */ + 267, /* (142) nulls ::= NULLS LAST */ + 267, /* (143) nulls ::= */ + 248, /* (144) groupby_opt ::= */ + 248, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 249, /* (146) having_opt ::= */ + 249, /* (147) having_opt ::= HAVING expr */ + 251, /* (148) limit_opt ::= */ + 251, /* (149) limit_opt ::= LIMIT expr */ + 251, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + 251, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + 191, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 247, /* (153) where_opt ::= */ + 247, /* (154) where_opt ::= WHERE expr */ + 269, /* (155) where_opt_ret ::= */ + 269, /* (156) where_opt_ret ::= WHERE expr */ + 269, /* (157) where_opt_ret ::= RETURNING selcollist */ + 269, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 191, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + 270, /* (160) setlist ::= setlist COMMA nm EQ expr */ + 270, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 270, /* (162) setlist ::= nm EQ expr */ + 270, /* (163) setlist ::= LP idlist RP EQ expr */ + 191, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 191, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 273, /* (166) upsert ::= */ + 273, /* (167) upsert ::= RETURNING selcollist */ + 273, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 273, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 273, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + 273, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 274, /* (172) returning ::= RETURNING selcollist */ + 271, /* (173) insert_cmd ::= INSERT orconf */ + 271, /* (174) insert_cmd ::= REPLACE */ + 272, /* (175) idlist_opt ::= */ + 272, /* (176) idlist_opt ::= LP idlist RP */ + 265, /* (177) idlist ::= idlist COMMA nm */ + 265, /* (178) idlist ::= nm */ + 218, /* (179) expr ::= LP expr RP */ + 218, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + 218, /* (181) expr ::= nm DOT nm */ + 218, /* (182) expr ::= nm DOT nm DOT nm */ + 217, /* (183) term ::= NULL|FLOAT|BLOB */ + 217, /* (184) term ::= STRING */ + 217, /* (185) term ::= INTEGER */ + 218, /* (186) expr ::= VARIABLE */ + 218, /* (187) expr ::= expr COLLATE ID|STRING */ + 218, /* (188) expr ::= CAST LP expr AS typetoken RP */ + 218, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 218, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 218, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 218, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 218, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 218, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 217, /* (195) term ::= CTIME_KW */ + 218, /* (196) expr ::= LP nexprlist COMMA expr RP */ + 218, /* (197) expr ::= expr AND expr */ + 218, /* (198) expr ::= expr OR expr */ + 218, /* (199) expr ::= expr LT|GT|GE|LE expr */ + 218, /* (200) expr ::= expr EQ|NE expr */ + 218, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 218, /* (202) expr ::= expr PLUS|MINUS expr */ + 218, /* (203) expr ::= expr STAR|SLASH|REM expr */ + 218, /* (204) expr ::= expr CONCAT expr */ + 276, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + 218, /* (206) expr ::= expr likeop expr */ + 218, /* (207) expr ::= expr likeop expr ESCAPE expr */ + 218, /* (208) expr ::= expr ISNULL|NOTNULL */ + 218, /* (209) expr ::= expr NOT NULL */ + 218, /* (210) expr ::= expr IS expr */ + 218, /* (211) expr ::= expr IS NOT expr */ + 218, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + 218, /* (213) expr ::= expr IS DISTINCT FROM expr */ + 218, /* (214) expr ::= NOT expr */ + 218, /* (215) expr ::= BITNOT expr */ + 218, /* (216) expr ::= PLUS|MINUS expr */ + 218, /* (217) expr ::= expr PTR expr */ + 277, /* (218) between_op ::= BETWEEN */ + 277, /* (219) between_op ::= NOT BETWEEN */ + 218, /* (220) expr ::= expr between_op expr AND expr */ + 278, /* (221) in_op ::= IN */ + 278, /* (222) in_op ::= NOT IN */ + 218, /* (223) expr ::= expr in_op LP exprlist RP */ + 218, /* (224) expr ::= LP select RP */ + 218, /* (225) expr ::= expr in_op LP select RP */ + 218, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + 218, /* (227) expr ::= EXISTS LP select RP */ + 218, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + 281, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 281, /* (230) case_exprlist ::= WHEN expr THEN expr */ + 282, /* (231) case_else ::= ELSE expr */ + 282, /* (232) case_else ::= */ + 280, /* (233) case_operand ::= */ + 263, /* (234) exprlist ::= */ + 254, /* (235) nexprlist ::= nexprlist COMMA expr */ + 254, /* (236) nexprlist ::= expr */ + 279, /* (237) paren_exprlist ::= */ + 279, /* (238) paren_exprlist ::= LP exprlist RP */ + 191, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 283, /* (240) uniqueflag ::= UNIQUE */ + 283, /* (241) uniqueflag ::= */ + 222, /* (242) eidlist_opt ::= */ + 222, /* (243) eidlist_opt ::= LP eidlist RP */ + 233, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + 233, /* (245) eidlist ::= nm collate sortorder */ + 284, /* (246) collate ::= */ + 284, /* (247) collate ::= COLLATE ID|STRING */ + 191, /* (248) cmd ::= DROP INDEX ifexists fullname */ + 191, /* (249) cmd ::= VACUUM vinto */ + 191, /* (250) cmd ::= VACUUM nm vinto */ + 285, /* (251) vinto ::= INTO expr */ + 285, /* (252) vinto ::= */ + 191, /* (253) cmd ::= PRAGMA nm dbnm */ + 191, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 191, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 191, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 191, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 212, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + 213, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + 191, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 287, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 289, /* (262) trigger_time ::= BEFORE|AFTER */ + 289, /* (263) trigger_time ::= INSTEAD OF */ + 289, /* (264) trigger_time ::= */ + 290, /* (265) trigger_event ::= DELETE|INSERT */ + 290, /* (266) trigger_event ::= UPDATE */ + 290, /* (267) trigger_event ::= UPDATE OF idlist */ + 292, /* (268) when_clause ::= */ + 292, /* (269) when_clause ::= WHEN expr */ + 288, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 288, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + 294, /* (272) trnm ::= nm DOT nm */ + 295, /* (273) tridxby ::= INDEXED BY nm */ + 295, /* (274) tridxby ::= NOT INDEXED */ + 293, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 293, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 293, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 293, /* (278) trigger_cmd ::= scanpt select scanpt */ + 218, /* (279) expr ::= RAISE LP IGNORE RP */ + 218, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + 237, /* (281) raisetype ::= ROLLBACK */ + 237, /* (282) raisetype ::= ABORT */ + 237, /* (283) raisetype ::= FAIL */ + 191, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + 191, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 191, /* (286) cmd ::= DETACH database_kw_opt expr */ + 297, /* (287) key_opt ::= */ + 297, /* (288) key_opt ::= KEY expr */ + 191, /* (289) cmd ::= REINDEX */ + 191, /* (290) cmd ::= REINDEX nm dbnm */ + 191, /* (291) cmd ::= ANALYZE */ + 191, /* (292) cmd ::= ANALYZE nm dbnm */ + 191, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 191, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 191, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 298, /* (296) add_column_fullname ::= fullname */ + 191, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 191, /* (298) cmd ::= create_vtab */ + 191, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + 300, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 302, /* (301) vtabarg ::= */ + 303, /* (302) vtabargtoken ::= ANY */ + 303, /* (303) vtabargtoken ::= lp anylist RP */ + 304, /* (304) lp ::= LP */ + 268, /* (305) with ::= WITH wqlist */ + 268, /* (306) with ::= WITH RECURSIVE wqlist */ + 307, /* (307) wqas ::= AS */ + 307, /* (308) wqas ::= AS MATERIALIZED */ + 307, /* (309) wqas ::= AS NOT MATERIALIZED */ + 306, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + 308, /* (311) withnm ::= nm */ + 242, /* (312) wqlist ::= wqitem */ + 242, /* (313) wqlist ::= wqlist COMMA wqitem */ + 309, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 310, /* (315) windowdefn ::= nm AS LP window RP */ + 311, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 311, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 311, /* (318) window ::= ORDER BY sortlist frame_opt */ + 311, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + 311, /* (320) window ::= nm frame_opt */ + 312, /* (321) frame_opt ::= */ + 312, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 312, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 316, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + 318, /* (325) frame_bound_s ::= frame_bound */ + 318, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + 319, /* (327) frame_bound_e ::= frame_bound */ + 319, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 317, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + 317, /* (330) frame_bound ::= CURRENT ROW */ + 320, /* (331) frame_exclude_opt ::= */ + 320, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 321, /* (333) frame_exclude ::= NO OTHERS */ + 321, /* (334) frame_exclude ::= CURRENT ROW */ + 321, /* (335) frame_exclude ::= GROUP|TIES */ + 252, /* (336) window_clause ::= WINDOW windowdefn_list */ + 275, /* (337) filter_over ::= filter_clause over_clause */ + 275, /* (338) filter_over ::= over_clause */ + 275, /* (339) filter_over ::= filter_clause */ + 315, /* (340) over_clause ::= OVER LP window RP */ + 315, /* (341) over_clause ::= OVER nm */ + 314, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + 217, /* (343) term ::= QNUMBER */ + 186, /* (344) input ::= cmdlist */ + 187, /* (345) cmdlist ::= cmdlist ecmd */ + 187, /* (346) cmdlist ::= ecmd */ + 188, /* (347) ecmd ::= SEMI */ + 188, /* (348) ecmd ::= cmdx SEMI */ + 188, /* (349) ecmd ::= explain cmdx SEMI */ + 193, /* (350) trans_opt ::= */ + 193, /* (351) trans_opt ::= TRANSACTION */ + 193, /* (352) trans_opt ::= TRANSACTION nm */ + 195, /* (353) savepoint_opt ::= SAVEPOINT */ + 195, /* (354) savepoint_opt ::= */ + 191, /* (355) cmd ::= create_table create_table_args */ + 204, /* (356) table_option_set ::= table_option */ + 202, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + 202, /* (358) columnlist ::= columnname carglist */ + 194, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + 194, /* (360) nm ::= STRING */ + 209, /* (361) typetoken ::= typename */ + 210, /* (362) typename ::= ID|STRING */ + 211, /* (363) signed ::= plus_num */ + 211, /* (364) signed ::= minus_num */ + 208, /* (365) carglist ::= carglist ccons */ + 208, /* (366) carglist ::= */ + 216, /* (367) ccons ::= NULL onconf */ + 216, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + 216, /* (369) ccons ::= AS generated */ + 203, /* (370) conslist_opt ::= COMMA conslist */ + 229, /* (371) conslist ::= conslist tconscomma tcons */ + 229, /* (372) conslist ::= tcons */ + 230, /* (373) tconscomma ::= */ + 234, /* (374) defer_subclause_opt ::= defer_subclause */ + 236, /* (375) resolvetype ::= raisetype */ + 240, /* (376) selectnowith ::= oneselect */ + 241, /* (377) oneselect ::= values */ + 256, /* (378) sclp ::= selcollist COMMA */ + 257, /* (379) as ::= ID|STRING */ + 266, /* (380) indexed_opt ::= indexed_by */ + 274, /* (381) returning ::= */ + 218, /* (382) expr ::= term */ + 276, /* (383) likeop ::= LIKE_KW|MATCH */ + 280, /* (384) case_operand ::= expr */ + 263, /* (385) exprlist ::= nexprlist */ + 286, /* (386) nmnum ::= plus_num */ + 286, /* (387) nmnum ::= nm */ + 286, /* (388) nmnum ::= ON */ + 286, /* (389) nmnum ::= DELETE */ + 286, /* (390) nmnum ::= DEFAULT */ + 212, /* (391) plus_num ::= INTEGER|FLOAT */ + 291, /* (392) foreach_clause ::= */ + 291, /* (393) foreach_clause ::= FOR EACH ROW */ + 294, /* (394) trnm ::= nm */ + 295, /* (395) tridxby ::= */ + 296, /* (396) database_kw_opt ::= DATABASE */ + 296, /* (397) database_kw_opt ::= */ + 299, /* (398) kwcolumn_opt ::= */ + 299, /* (399) kwcolumn_opt ::= COLUMNKW */ + 301, /* (400) vtabarglist ::= vtabarg */ + 301, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + 302, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 305, /* (403) anylist ::= */ + 305, /* (404) anylist ::= anylist LP anylist RP */ + 305, /* (405) anylist ::= anylist ANY */ + 268, /* (406) with ::= */ + 309, /* (407) windowdefn_list ::= windowdefn */ + 311, /* (408) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -174144,316 +175752,320 @@ static const signed char yyRuleInfoNRhs[] = { -9, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ -10, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ -4, /* (94) values ::= VALUES LP nexprlist RP */ - -5, /* (95) values ::= values COMMA LP nexprlist RP */ - -1, /* (96) distinct ::= DISTINCT */ - -1, /* (97) distinct ::= ALL */ - 0, /* (98) distinct ::= */ - 0, /* (99) sclp ::= */ - -5, /* (100) selcollist ::= sclp scanpt expr scanpt as */ - -3, /* (101) selcollist ::= sclp scanpt STAR */ - -5, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ - -2, /* (103) as ::= AS nm */ - 0, /* (104) as ::= */ - 0, /* (105) from ::= */ - -2, /* (106) from ::= FROM seltablist */ - -2, /* (107) stl_prefix ::= seltablist joinop */ - 0, /* (108) stl_prefix ::= */ - -5, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */ - -6, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - -8, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - -6, /* (112) seltablist ::= stl_prefix LP select RP as on_using */ - -6, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 0, /* (114) dbnm ::= */ - -2, /* (115) dbnm ::= DOT nm */ - -1, /* (116) fullname ::= nm */ - -3, /* (117) fullname ::= nm DOT nm */ - -1, /* (118) xfullname ::= nm */ - -3, /* (119) xfullname ::= nm DOT nm */ - -5, /* (120) xfullname ::= nm DOT nm AS nm */ - -3, /* (121) xfullname ::= nm AS nm */ - -1, /* (122) joinop ::= COMMA|JOIN */ - -2, /* (123) joinop ::= JOIN_KW JOIN */ - -3, /* (124) joinop ::= JOIN_KW nm JOIN */ - -4, /* (125) joinop ::= JOIN_KW nm nm JOIN */ - -2, /* (126) on_using ::= ON expr */ - -4, /* (127) on_using ::= USING LP idlist RP */ - 0, /* (128) on_using ::= */ - 0, /* (129) indexed_opt ::= */ - -3, /* (130) indexed_by ::= INDEXED BY nm */ - -2, /* (131) indexed_by ::= NOT INDEXED */ - 0, /* (132) orderby_opt ::= */ - -3, /* (133) orderby_opt ::= ORDER BY sortlist */ - -5, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ - -3, /* (135) sortlist ::= expr sortorder nulls */ - -1, /* (136) sortorder ::= ASC */ - -1, /* (137) sortorder ::= DESC */ - 0, /* (138) sortorder ::= */ - -2, /* (139) nulls ::= NULLS FIRST */ - -2, /* (140) nulls ::= NULLS LAST */ - 0, /* (141) nulls ::= */ - 0, /* (142) groupby_opt ::= */ - -3, /* (143) groupby_opt ::= GROUP BY nexprlist */ - 0, /* (144) having_opt ::= */ - -2, /* (145) having_opt ::= HAVING expr */ - 0, /* (146) limit_opt ::= */ - -2, /* (147) limit_opt ::= LIMIT expr */ - -4, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ - -4, /* (149) limit_opt ::= LIMIT expr COMMA expr */ - -6, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 0, /* (151) where_opt ::= */ - -2, /* (152) where_opt ::= WHERE expr */ - 0, /* (153) where_opt_ret ::= */ - -2, /* (154) where_opt_ret ::= WHERE expr */ - -2, /* (155) where_opt_ret ::= RETURNING selcollist */ - -4, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ - -9, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - -5, /* (158) setlist ::= setlist COMMA nm EQ expr */ - -7, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ - -3, /* (160) setlist ::= nm EQ expr */ - -5, /* (161) setlist ::= LP idlist RP EQ expr */ - -7, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - -8, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 0, /* (164) upsert ::= */ - -2, /* (165) upsert ::= RETURNING selcollist */ - -12, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - -9, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - -5, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ - -8, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - -2, /* (170) returning ::= RETURNING selcollist */ - -2, /* (171) insert_cmd ::= INSERT orconf */ - -1, /* (172) insert_cmd ::= REPLACE */ - 0, /* (173) idlist_opt ::= */ - -3, /* (174) idlist_opt ::= LP idlist RP */ - -3, /* (175) idlist ::= idlist COMMA nm */ - -1, /* (176) idlist ::= nm */ - -3, /* (177) expr ::= LP expr RP */ - -1, /* (178) expr ::= ID|INDEXED|JOIN_KW */ - -3, /* (179) expr ::= nm DOT nm */ - -5, /* (180) expr ::= nm DOT nm DOT nm */ - -1, /* (181) term ::= NULL|FLOAT|BLOB */ - -1, /* (182) term ::= STRING */ - -1, /* (183) term ::= INTEGER */ - -1, /* (184) expr ::= VARIABLE */ - -3, /* (185) expr ::= expr COLLATE ID|STRING */ - -6, /* (186) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - -1, /* (193) term ::= CTIME_KW */ - -5, /* (194) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (195) expr ::= expr AND expr */ - -3, /* (196) expr ::= expr OR expr */ - -3, /* (197) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (198) expr ::= expr EQ|NE expr */ - -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (200) expr ::= expr PLUS|MINUS expr */ - -3, /* (201) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (202) expr ::= expr CONCAT expr */ - -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (204) expr ::= expr likeop expr */ - -5, /* (205) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (206) expr ::= expr ISNULL|NOTNULL */ - -3, /* (207) expr ::= expr NOT NULL */ - -3, /* (208) expr ::= expr IS expr */ - -4, /* (209) expr ::= expr IS NOT expr */ - -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ - -5, /* (211) expr ::= expr IS DISTINCT FROM expr */ - -2, /* (212) expr ::= NOT expr */ - -2, /* (213) expr ::= BITNOT expr */ - -2, /* (214) expr ::= PLUS|MINUS expr */ - -3, /* (215) expr ::= expr PTR expr */ - -1, /* (216) between_op ::= BETWEEN */ - -2, /* (217) between_op ::= NOT BETWEEN */ - -5, /* (218) expr ::= expr between_op expr AND expr */ - -1, /* (219) in_op ::= IN */ - -2, /* (220) in_op ::= NOT IN */ - -5, /* (221) expr ::= expr in_op LP exprlist RP */ - -3, /* (222) expr ::= LP select RP */ - -5, /* (223) expr ::= expr in_op LP select RP */ - -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (225) expr ::= EXISTS LP select RP */ - -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (228) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (229) case_else ::= ELSE expr */ - 0, /* (230) case_else ::= */ - 0, /* (231) case_operand ::= */ - 0, /* (232) exprlist ::= */ - -3, /* (233) nexprlist ::= nexprlist COMMA expr */ - -1, /* (234) nexprlist ::= expr */ - 0, /* (235) paren_exprlist ::= */ - -3, /* (236) paren_exprlist ::= LP exprlist RP */ - -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (238) uniqueflag ::= UNIQUE */ - 0, /* (239) uniqueflag ::= */ - 0, /* (240) eidlist_opt ::= */ - -3, /* (241) eidlist_opt ::= LP eidlist RP */ - -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (243) eidlist ::= nm collate sortorder */ - 0, /* (244) collate ::= */ - -2, /* (245) collate ::= COLLATE ID|STRING */ - -4, /* (246) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (247) cmd ::= VACUUM vinto */ - -3, /* (248) cmd ::= VACUUM nm vinto */ - -2, /* (249) vinto ::= INTO expr */ - 0, /* (250) vinto ::= */ - -3, /* (251) cmd ::= PRAGMA nm dbnm */ - -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (260) trigger_time ::= BEFORE|AFTER */ - -2, /* (261) trigger_time ::= INSTEAD OF */ - 0, /* (262) trigger_time ::= */ - -1, /* (263) trigger_event ::= DELETE|INSERT */ - -1, /* (264) trigger_event ::= UPDATE */ - -3, /* (265) trigger_event ::= UPDATE OF idlist */ - 0, /* (266) when_clause ::= */ - -2, /* (267) when_clause ::= WHEN expr */ - -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (270) trnm ::= nm DOT nm */ - -3, /* (271) tridxby ::= INDEXED BY nm */ - -2, /* (272) tridxby ::= NOT INDEXED */ - -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (276) trigger_cmd ::= scanpt select scanpt */ - -4, /* (277) expr ::= RAISE LP IGNORE RP */ - -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (279) raisetype ::= ROLLBACK */ - -1, /* (280) raisetype ::= ABORT */ - -1, /* (281) raisetype ::= FAIL */ - -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (284) cmd ::= DETACH database_kw_opt expr */ - 0, /* (285) key_opt ::= */ - -2, /* (286) key_opt ::= KEY expr */ - -1, /* (287) cmd ::= REINDEX */ - -3, /* (288) cmd ::= REINDEX nm dbnm */ - -1, /* (289) cmd ::= ANALYZE */ - -3, /* (290) cmd ::= ANALYZE nm dbnm */ - -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - -1, /* (294) add_column_fullname ::= fullname */ - -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (296) cmd ::= create_vtab */ - -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (299) vtabarg ::= */ - -1, /* (300) vtabargtoken ::= ANY */ - -3, /* (301) vtabargtoken ::= lp anylist RP */ - -1, /* (302) lp ::= LP */ - -2, /* (303) with ::= WITH wqlist */ - -3, /* (304) with ::= WITH RECURSIVE wqlist */ - -1, /* (305) wqas ::= AS */ - -2, /* (306) wqas ::= AS MATERIALIZED */ - -3, /* (307) wqas ::= AS NOT MATERIALIZED */ - -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - -1, /* (309) wqlist ::= wqitem */ - -3, /* (310) wqlist ::= wqlist COMMA wqitem */ - -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (312) windowdefn ::= nm AS LP window RP */ - -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (315) window ::= ORDER BY sortlist frame_opt */ - -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */ - -2, /* (317) window ::= nm frame_opt */ - 0, /* (318) frame_opt ::= */ - -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (322) frame_bound_s ::= frame_bound */ - -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (324) frame_bound_e ::= frame_bound */ - -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (327) frame_bound ::= CURRENT ROW */ - 0, /* (328) frame_exclude_opt ::= */ - -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (330) frame_exclude ::= NO OTHERS */ - -2, /* (331) frame_exclude ::= CURRENT ROW */ - -1, /* (332) frame_exclude ::= GROUP|TIES */ - -2, /* (333) window_clause ::= WINDOW windowdefn_list */ - -2, /* (334) filter_over ::= filter_clause over_clause */ - -1, /* (335) filter_over ::= over_clause */ - -1, /* (336) filter_over ::= filter_clause */ - -4, /* (337) over_clause ::= OVER LP window RP */ - -2, /* (338) over_clause ::= OVER nm */ - -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (340) input ::= cmdlist */ - -2, /* (341) cmdlist ::= cmdlist ecmd */ - -1, /* (342) cmdlist ::= ecmd */ - -1, /* (343) ecmd ::= SEMI */ - -2, /* (344) ecmd ::= cmdx SEMI */ - -3, /* (345) ecmd ::= explain cmdx SEMI */ - 0, /* (346) trans_opt ::= */ - -1, /* (347) trans_opt ::= TRANSACTION */ - -2, /* (348) trans_opt ::= TRANSACTION nm */ - -1, /* (349) savepoint_opt ::= SAVEPOINT */ - 0, /* (350) savepoint_opt ::= */ - -2, /* (351) cmd ::= create_table create_table_args */ - -1, /* (352) table_option_set ::= table_option */ - -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (354) columnlist ::= columnname carglist */ - -1, /* (355) nm ::= ID|INDEXED|JOIN_KW */ - -1, /* (356) nm ::= STRING */ - -1, /* (357) typetoken ::= typename */ - -1, /* (358) typename ::= ID|STRING */ - -1, /* (359) signed ::= plus_num */ - -1, /* (360) signed ::= minus_num */ - -2, /* (361) carglist ::= carglist ccons */ - 0, /* (362) carglist ::= */ - -2, /* (363) ccons ::= NULL onconf */ - -4, /* (364) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (365) ccons ::= AS generated */ - -2, /* (366) conslist_opt ::= COMMA conslist */ - -3, /* (367) conslist ::= conslist tconscomma tcons */ - -1, /* (368) conslist ::= tcons */ - 0, /* (369) tconscomma ::= */ - -1, /* (370) defer_subclause_opt ::= defer_subclause */ - -1, /* (371) resolvetype ::= raisetype */ - -1, /* (372) selectnowith ::= oneselect */ - -1, /* (373) oneselect ::= values */ - -2, /* (374) sclp ::= selcollist COMMA */ - -1, /* (375) as ::= ID|STRING */ - -1, /* (376) indexed_opt ::= indexed_by */ - 0, /* (377) returning ::= */ - -1, /* (378) expr ::= term */ - -1, /* (379) likeop ::= LIKE_KW|MATCH */ - -1, /* (380) case_operand ::= expr */ - -1, /* (381) exprlist ::= nexprlist */ - -1, /* (382) nmnum ::= plus_num */ - -1, /* (383) nmnum ::= nm */ - -1, /* (384) nmnum ::= ON */ - -1, /* (385) nmnum ::= DELETE */ - -1, /* (386) nmnum ::= DEFAULT */ - -1, /* (387) plus_num ::= INTEGER|FLOAT */ - 0, /* (388) foreach_clause ::= */ - -3, /* (389) foreach_clause ::= FOR EACH ROW */ - -1, /* (390) trnm ::= nm */ - 0, /* (391) tridxby ::= */ - -1, /* (392) database_kw_opt ::= DATABASE */ - 0, /* (393) database_kw_opt ::= */ - 0, /* (394) kwcolumn_opt ::= */ - -1, /* (395) kwcolumn_opt ::= COLUMNKW */ - -1, /* (396) vtabarglist ::= vtabarg */ - -3, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (398) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (399) anylist ::= */ - -4, /* (400) anylist ::= anylist LP anylist RP */ - -2, /* (401) anylist ::= anylist ANY */ - 0, /* (402) with ::= */ - -1, /* (403) windowdefn_list ::= windowdefn */ - -1, /* (404) window ::= frame_opt */ + -1, /* (95) oneselect ::= mvalues */ + -5, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + -5, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + -1, /* (98) distinct ::= DISTINCT */ + -1, /* (99) distinct ::= ALL */ + 0, /* (100) distinct ::= */ + 0, /* (101) sclp ::= */ + -5, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + -3, /* (103) selcollist ::= sclp scanpt STAR */ + -5, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + -2, /* (105) as ::= AS nm */ + 0, /* (106) as ::= */ + 0, /* (107) from ::= */ + -2, /* (108) from ::= FROM seltablist */ + -2, /* (109) stl_prefix ::= seltablist joinop */ + 0, /* (110) stl_prefix ::= */ + -5, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + -6, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + -8, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + -6, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + -6, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 0, /* (116) dbnm ::= */ + -2, /* (117) dbnm ::= DOT nm */ + -1, /* (118) fullname ::= nm */ + -3, /* (119) fullname ::= nm DOT nm */ + -1, /* (120) xfullname ::= nm */ + -3, /* (121) xfullname ::= nm DOT nm */ + -5, /* (122) xfullname ::= nm DOT nm AS nm */ + -3, /* (123) xfullname ::= nm AS nm */ + -1, /* (124) joinop ::= COMMA|JOIN */ + -2, /* (125) joinop ::= JOIN_KW JOIN */ + -3, /* (126) joinop ::= JOIN_KW nm JOIN */ + -4, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + -2, /* (128) on_using ::= ON expr */ + -4, /* (129) on_using ::= USING LP idlist RP */ + 0, /* (130) on_using ::= */ + 0, /* (131) indexed_opt ::= */ + -3, /* (132) indexed_by ::= INDEXED BY nm */ + -2, /* (133) indexed_by ::= NOT INDEXED */ + 0, /* (134) orderby_opt ::= */ + -3, /* (135) orderby_opt ::= ORDER BY sortlist */ + -5, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + -3, /* (137) sortlist ::= expr sortorder nulls */ + -1, /* (138) sortorder ::= ASC */ + -1, /* (139) sortorder ::= DESC */ + 0, /* (140) sortorder ::= */ + -2, /* (141) nulls ::= NULLS FIRST */ + -2, /* (142) nulls ::= NULLS LAST */ + 0, /* (143) nulls ::= */ + 0, /* (144) groupby_opt ::= */ + -3, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 0, /* (146) having_opt ::= */ + -2, /* (147) having_opt ::= HAVING expr */ + 0, /* (148) limit_opt ::= */ + -2, /* (149) limit_opt ::= LIMIT expr */ + -4, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + -4, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + -6, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 0, /* (153) where_opt ::= */ + -2, /* (154) where_opt ::= WHERE expr */ + 0, /* (155) where_opt_ret ::= */ + -2, /* (156) where_opt_ret ::= WHERE expr */ + -2, /* (157) where_opt_ret ::= RETURNING selcollist */ + -4, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + -9, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + -5, /* (160) setlist ::= setlist COMMA nm EQ expr */ + -7, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + -3, /* (162) setlist ::= nm EQ expr */ + -5, /* (163) setlist ::= LP idlist RP EQ expr */ + -7, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + -8, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 0, /* (166) upsert ::= */ + -2, /* (167) upsert ::= RETURNING selcollist */ + -12, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + -9, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + -5, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + -8, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + -2, /* (172) returning ::= RETURNING selcollist */ + -2, /* (173) insert_cmd ::= INSERT orconf */ + -1, /* (174) insert_cmd ::= REPLACE */ + 0, /* (175) idlist_opt ::= */ + -3, /* (176) idlist_opt ::= LP idlist RP */ + -3, /* (177) idlist ::= idlist COMMA nm */ + -1, /* (178) idlist ::= nm */ + -3, /* (179) expr ::= LP expr RP */ + -1, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + -3, /* (181) expr ::= nm DOT nm */ + -5, /* (182) expr ::= nm DOT nm DOT nm */ + -1, /* (183) term ::= NULL|FLOAT|BLOB */ + -1, /* (184) term ::= STRING */ + -1, /* (185) term ::= INTEGER */ + -1, /* (186) expr ::= VARIABLE */ + -3, /* (187) expr ::= expr COLLATE ID|STRING */ + -6, /* (188) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + -8, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + -4, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + -6, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + -9, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + -5, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + -1, /* (195) term ::= CTIME_KW */ + -5, /* (196) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (197) expr ::= expr AND expr */ + -3, /* (198) expr ::= expr OR expr */ + -3, /* (199) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (200) expr ::= expr EQ|NE expr */ + -3, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (202) expr ::= expr PLUS|MINUS expr */ + -3, /* (203) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (204) expr ::= expr CONCAT expr */ + -2, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (206) expr ::= expr likeop expr */ + -5, /* (207) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (208) expr ::= expr ISNULL|NOTNULL */ + -3, /* (209) expr ::= expr NOT NULL */ + -3, /* (210) expr ::= expr IS expr */ + -4, /* (211) expr ::= expr IS NOT expr */ + -6, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + -5, /* (213) expr ::= expr IS DISTINCT FROM expr */ + -2, /* (214) expr ::= NOT expr */ + -2, /* (215) expr ::= BITNOT expr */ + -2, /* (216) expr ::= PLUS|MINUS expr */ + -3, /* (217) expr ::= expr PTR expr */ + -1, /* (218) between_op ::= BETWEEN */ + -2, /* (219) between_op ::= NOT BETWEEN */ + -5, /* (220) expr ::= expr between_op expr AND expr */ + -1, /* (221) in_op ::= IN */ + -2, /* (222) in_op ::= NOT IN */ + -5, /* (223) expr ::= expr in_op LP exprlist RP */ + -3, /* (224) expr ::= LP select RP */ + -5, /* (225) expr ::= expr in_op LP select RP */ + -5, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (227) expr ::= EXISTS LP select RP */ + -5, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (230) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (231) case_else ::= ELSE expr */ + 0, /* (232) case_else ::= */ + 0, /* (233) case_operand ::= */ + 0, /* (234) exprlist ::= */ + -3, /* (235) nexprlist ::= nexprlist COMMA expr */ + -1, /* (236) nexprlist ::= expr */ + 0, /* (237) paren_exprlist ::= */ + -3, /* (238) paren_exprlist ::= LP exprlist RP */ + -12, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (240) uniqueflag ::= UNIQUE */ + 0, /* (241) uniqueflag ::= */ + 0, /* (242) eidlist_opt ::= */ + -3, /* (243) eidlist_opt ::= LP eidlist RP */ + -5, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (245) eidlist ::= nm collate sortorder */ + 0, /* (246) collate ::= */ + -2, /* (247) collate ::= COLLATE ID|STRING */ + -4, /* (248) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (249) cmd ::= VACUUM vinto */ + -3, /* (250) cmd ::= VACUUM nm vinto */ + -2, /* (251) vinto ::= INTO expr */ + 0, /* (252) vinto ::= */ + -3, /* (253) cmd ::= PRAGMA nm dbnm */ + -5, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (262) trigger_time ::= BEFORE|AFTER */ + -2, /* (263) trigger_time ::= INSTEAD OF */ + 0, /* (264) trigger_time ::= */ + -1, /* (265) trigger_event ::= DELETE|INSERT */ + -1, /* (266) trigger_event ::= UPDATE */ + -3, /* (267) trigger_event ::= UPDATE OF idlist */ + 0, /* (268) when_clause ::= */ + -2, /* (269) when_clause ::= WHEN expr */ + -3, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (272) trnm ::= nm DOT nm */ + -3, /* (273) tridxby ::= INDEXED BY nm */ + -2, /* (274) tridxby ::= NOT INDEXED */ + -9, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (278) trigger_cmd ::= scanpt select scanpt */ + -4, /* (279) expr ::= RAISE LP IGNORE RP */ + -6, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (281) raisetype ::= ROLLBACK */ + -1, /* (282) raisetype ::= ABORT */ + -1, /* (283) raisetype ::= FAIL */ + -4, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (286) cmd ::= DETACH database_kw_opt expr */ + 0, /* (287) key_opt ::= */ + -2, /* (288) key_opt ::= KEY expr */ + -1, /* (289) cmd ::= REINDEX */ + -3, /* (290) cmd ::= REINDEX nm dbnm */ + -1, /* (291) cmd ::= ANALYZE */ + -3, /* (292) cmd ::= ANALYZE nm dbnm */ + -6, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (296) add_column_fullname ::= fullname */ + -8, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (298) cmd ::= create_vtab */ + -4, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (301) vtabarg ::= */ + -1, /* (302) vtabargtoken ::= ANY */ + -3, /* (303) vtabargtoken ::= lp anylist RP */ + -1, /* (304) lp ::= LP */ + -2, /* (305) with ::= WITH wqlist */ + -3, /* (306) with ::= WITH RECURSIVE wqlist */ + -1, /* (307) wqas ::= AS */ + -2, /* (308) wqas ::= AS MATERIALIZED */ + -3, /* (309) wqas ::= AS NOT MATERIALIZED */ + -6, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + -1, /* (311) withnm ::= nm */ + -1, /* (312) wqlist ::= wqitem */ + -3, /* (313) wqlist ::= wqlist COMMA wqitem */ + -3, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (315) windowdefn ::= nm AS LP window RP */ + -5, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (318) window ::= ORDER BY sortlist frame_opt */ + -5, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + -2, /* (320) window ::= nm frame_opt */ + 0, /* (321) frame_opt ::= */ + -3, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (325) frame_bound_s ::= frame_bound */ + -2, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (327) frame_bound_e ::= frame_bound */ + -2, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (330) frame_bound ::= CURRENT ROW */ + 0, /* (331) frame_exclude_opt ::= */ + -2, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (333) frame_exclude ::= NO OTHERS */ + -2, /* (334) frame_exclude ::= CURRENT ROW */ + -1, /* (335) frame_exclude ::= GROUP|TIES */ + -2, /* (336) window_clause ::= WINDOW windowdefn_list */ + -2, /* (337) filter_over ::= filter_clause over_clause */ + -1, /* (338) filter_over ::= over_clause */ + -1, /* (339) filter_over ::= filter_clause */ + -4, /* (340) over_clause ::= OVER LP window RP */ + -2, /* (341) over_clause ::= OVER nm */ + -5, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (343) term ::= QNUMBER */ + -1, /* (344) input ::= cmdlist */ + -2, /* (345) cmdlist ::= cmdlist ecmd */ + -1, /* (346) cmdlist ::= ecmd */ + -1, /* (347) ecmd ::= SEMI */ + -2, /* (348) ecmd ::= cmdx SEMI */ + -3, /* (349) ecmd ::= explain cmdx SEMI */ + 0, /* (350) trans_opt ::= */ + -1, /* (351) trans_opt ::= TRANSACTION */ + -2, /* (352) trans_opt ::= TRANSACTION nm */ + -1, /* (353) savepoint_opt ::= SAVEPOINT */ + 0, /* (354) savepoint_opt ::= */ + -2, /* (355) cmd ::= create_table create_table_args */ + -1, /* (356) table_option_set ::= table_option */ + -4, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (358) columnlist ::= columnname carglist */ + -1, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + -1, /* (360) nm ::= STRING */ + -1, /* (361) typetoken ::= typename */ + -1, /* (362) typename ::= ID|STRING */ + -1, /* (363) signed ::= plus_num */ + -1, /* (364) signed ::= minus_num */ + -2, /* (365) carglist ::= carglist ccons */ + 0, /* (366) carglist ::= */ + -2, /* (367) ccons ::= NULL onconf */ + -4, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (369) ccons ::= AS generated */ + -2, /* (370) conslist_opt ::= COMMA conslist */ + -3, /* (371) conslist ::= conslist tconscomma tcons */ + -1, /* (372) conslist ::= tcons */ + 0, /* (373) tconscomma ::= */ + -1, /* (374) defer_subclause_opt ::= defer_subclause */ + -1, /* (375) resolvetype ::= raisetype */ + -1, /* (376) selectnowith ::= oneselect */ + -1, /* (377) oneselect ::= values */ + -2, /* (378) sclp ::= selcollist COMMA */ + -1, /* (379) as ::= ID|STRING */ + -1, /* (380) indexed_opt ::= indexed_by */ + 0, /* (381) returning ::= */ + -1, /* (382) expr ::= term */ + -1, /* (383) likeop ::= LIKE_KW|MATCH */ + -1, /* (384) case_operand ::= expr */ + -1, /* (385) exprlist ::= nexprlist */ + -1, /* (386) nmnum ::= plus_num */ + -1, /* (387) nmnum ::= nm */ + -1, /* (388) nmnum ::= ON */ + -1, /* (389) nmnum ::= DELETE */ + -1, /* (390) nmnum ::= DEFAULT */ + -1, /* (391) plus_num ::= INTEGER|FLOAT */ + 0, /* (392) foreach_clause ::= */ + -3, /* (393) foreach_clause ::= FOR EACH ROW */ + -1, /* (394) trnm ::= nm */ + 0, /* (395) tridxby ::= */ + -1, /* (396) database_kw_opt ::= DATABASE */ + 0, /* (397) database_kw_opt ::= */ + 0, /* (398) kwcolumn_opt ::= */ + -1, /* (399) kwcolumn_opt ::= COLUMNKW */ + -1, /* (400) vtabarglist ::= vtabarg */ + -3, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (403) anylist ::= */ + -4, /* (404) anylist ::= anylist LP anylist RP */ + -2, /* (405) anylist ::= anylist ANY */ + 0, /* (406) with ::= */ + -1, /* (407) windowdefn_list ::= windowdefn */ + -1, /* (408) window ::= frame_opt */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -174505,16 +176117,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy394);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy144);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy394 = TK_DEFERRED;} +{yymsp[1].minor.yy144 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); -{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} + case 324: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==324); +{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -174537,7 +176149,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy394,0,0,yymsp[-2].minor.yy394); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy144,0,0,yymsp[-2].minor.yy144); } break; case 14: /* createkw ::= CREATE */ @@ -174549,40 +176161,40 @@ static YYACTIONTYPE yy_reduce( case 62: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==62); case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72); case 81: /* ifexists ::= */ yytestcase(yyruleno==81); - case 98: /* distinct ::= */ yytestcase(yyruleno==98); - case 244: /* collate ::= */ yytestcase(yyruleno==244); -{yymsp[1].minor.yy394 = 0;} + case 100: /* distinct ::= */ yytestcase(yyruleno==100); + case 246: /* collate ::= */ yytestcase(yyruleno==246); +{yymsp[1].minor.yy144 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy394 = 1;} +{yymsp[-2].minor.yy144 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy394 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy144 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy285,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy391,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy47); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy555); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); } break; case 21: /* table_option_set ::= */ -{yymsp[1].minor.yy285 = 0;} +{yymsp[1].minor.yy391 = 0;} break; case 22: /* table_option_set ::= table_option_set COMMA table_option */ -{yylhsminor.yy285 = yymsp[-2].minor.yy285|yymsp[0].minor.yy285;} - yymsp[-2].minor.yy285 = yylhsminor.yy285; +{yylhsminor.yy391 = yymsp[-2].minor.yy391|yymsp[0].minor.yy391;} + yymsp[-2].minor.yy391 = yylhsminor.yy391; break; case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy285 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy391 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy285 = 0; + yymsp[-1].minor.yy391 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -174590,20 +176202,20 @@ static YYACTIONTYPE yy_reduce( case 24: /* table_option ::= nm */ { if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ - yylhsminor.yy285 = TF_Strict; + yylhsminor.yy391 = TF_Strict; }else{ - yylhsminor.yy285 = 0; + yylhsminor.yy391 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } - yymsp[0].minor.yy285 = yylhsminor.yy285; + yymsp[0].minor.yy391 = yylhsminor.yy391; break; case 25: /* columnname ::= nm typetoken */ {sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} break; case 26: /* typetoken ::= */ case 65: /* conslist_opt ::= */ yytestcase(yyruleno==65); - case 104: /* as ::= */ yytestcase(yyruleno==104); + case 106: /* as ::= */ yytestcase(yyruleno==106); {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;} break; case 27: /* typetoken ::= typename LP signed RP */ @@ -174622,7 +176234,7 @@ static YYACTIONTYPE yy_reduce( case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy522 = yyLookaheadToken.z; + yymsp[1].minor.yy168 = yyLookaheadToken.z; } break; case 31: /* scantok ::= */ @@ -174636,17 +176248,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 35: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy528, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy454, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -174661,151 +176273,151 @@ static YYACTIONTYPE yy_reduce( } break; case 38: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy394);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy144);} break; case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy394,yymsp[0].minor.yy394,yymsp[-2].minor.yy394);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy144,yymsp[0].minor.yy144,yymsp[-2].minor.yy144);} break; case 40: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy394,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy144,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 41: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy322,yymsp[0].minor.yy394);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy144);} break; case 43: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy394);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy144);} break; case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 45: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy528,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy454,0);} break; case 46: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy528,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy454,&yymsp[0].minor.yy0);} break; case 48: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy394 = 1;} +{yymsp[0].minor.yy144 = 1;} break; case 49: /* refargs ::= */ -{ yymsp[1].minor.yy394 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy144 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 50: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy394 = (yymsp[-1].minor.yy394 & ~yymsp[0].minor.yy231.mask) | yymsp[0].minor.yy231.value; } +{ yymsp[-1].minor.yy144 = (yymsp[-1].minor.yy144 & ~yymsp[0].minor.yy383.mask) | yymsp[0].minor.yy383.value; } break; case 51: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy231.value = 0; yymsp[-1].minor.yy231.mask = 0x000000; } +{ yymsp[-1].minor.yy383.value = 0; yymsp[-1].minor.yy383.mask = 0x000000; } break; case 52: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy231.value = 0; yymsp[-2].minor.yy231.mask = 0x000000; } +{ yymsp[-2].minor.yy383.value = 0; yymsp[-2].minor.yy383.mask = 0x000000; } break; case 53: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394; yymsp[-2].minor.yy231.mask = 0x0000ff; } +{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144; yymsp[-2].minor.yy383.mask = 0x0000ff; } break; case 54: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394<<8; yymsp[-2].minor.yy231.mask = 0x00ff00; } +{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144<<8; yymsp[-2].minor.yy383.mask = 0x00ff00; } break; case 55: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy394 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_SetNull; /* EV: R-33326-45252 */} break; case 56: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy394 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 57: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy394 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy144 = OE_Cascade; /* EV: R-33326-45252 */} break; case 58: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy394 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy144 = OE_Restrict; /* EV: R-33326-45252 */} break; case 59: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy394 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_None; /* EV: R-33326-45252 */} break; case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy394 = 0;} +{yymsp[-2].minor.yy144 = 0;} break; case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); - case 171: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==171); -{yymsp[-1].minor.yy394 = yymsp[0].minor.yy394;} + case 173: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==173); +{yymsp[-1].minor.yy144 = yymsp[0].minor.yy144;} break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); - case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217); - case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220); - case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245); -{yymsp[-1].minor.yy394 = 1;} + case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); + case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); + case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); +{yymsp[-1].minor.yy144 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy394 = 0;} +{yymsp[-1].minor.yy144 = 0;} break; case 66: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy322,yymsp[0].minor.yy394,yymsp[-2].minor.yy394,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy144,yymsp[-2].minor.yy144,0);} break; case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy322,yymsp[0].minor.yy394,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy144,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 70: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy528,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy454,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy322, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[-1].minor.yy394); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy394); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy144); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy144); } break; case 73: /* onconf ::= */ case 75: /* orconf ::= */ yytestcase(yyruleno==75); -{yymsp[1].minor.yy394 = OE_Default;} +{yymsp[1].minor.yy144 = OE_Default;} break; case 74: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy394 = yymsp[0].minor.yy394;} +{yymsp[-2].minor.yy144 = yymsp[0].minor.yy144;} break; case 77: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy394 = OE_Ignore;} +{yymsp[0].minor.yy144 = OE_Ignore;} break; case 78: /* resolvetype ::= REPLACE */ - case 172: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==172); -{yymsp[0].minor.yy394 = OE_Replace;} + case 174: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==174); +{yymsp[0].minor.yy144 = OE_Replace;} break; case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy131, 0, yymsp[-1].minor.yy394); + sqlite3DropTable(pParse, yymsp[0].minor.yy203, 0, yymsp[-1].minor.yy144); } break; case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[0].minor.yy47, yymsp[-7].minor.yy394, yymsp[-5].minor.yy394); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy555, yymsp[-7].minor.yy144, yymsp[-5].minor.yy144); } break; case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy131, 1, yymsp[-1].minor.yy394); + sqlite3DropTable(pParse, yymsp[0].minor.yy203, 1, yymsp[-1].minor.yy144); } break; case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy47, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); + sqlite3Select(pParse, yymsp[0].minor.yy555, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); } break; case 85: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} +{yymsp[-2].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} break; case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} +{yymsp[-3].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} break; case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy47; + Select *p = yymsp[0].minor.yy555; if( p ){ parserDoubleLinkSelect(pParse, p); } @@ -174813,8 +176425,8 @@ static YYACTIONTYPE yy_reduce( break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy47; - Select *pLhs = yymsp[-2].minor.yy47; + Select *pRhs = yymsp[0].minor.yy555; + Select *pLhs = yymsp[-2].minor.yy555; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -174824,148 +176436,145 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy394; + pRhs->op = (u8)yymsp[-1].minor.yy144; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy394!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy144!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy47 = pRhs; + yymsp[-2].minor.yy555 = pRhs; } break; case 89: /* multiselect_op ::= UNION */ case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); -{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-OP*/} break; case 90: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy394 = TK_ALL;} +{yymsp[-1].minor.yy144 = TK_ALL;} break; case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy322,yymsp[-5].minor.yy131,yymsp[-4].minor.yy528,yymsp[-3].minor.yy322,yymsp[-2].minor.yy528,yymsp[-1].minor.yy322,yymsp[-7].minor.yy394,yymsp[0].minor.yy528); + yymsp[-8].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy203,yymsp[-4].minor.yy454,yymsp[-3].minor.yy14,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[-7].minor.yy144,yymsp[0].minor.yy454); } break; case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy322,yymsp[-6].minor.yy131,yymsp[-5].minor.yy528,yymsp[-4].minor.yy322,yymsp[-3].minor.yy528,yymsp[-1].minor.yy322,yymsp[-8].minor.yy394,yymsp[0].minor.yy528); - if( yymsp[-9].minor.yy47 ){ - yymsp[-9].minor.yy47->pWinDefn = yymsp[-2].minor.yy41; + yymsp[-9].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy14,yymsp[-6].minor.yy203,yymsp[-5].minor.yy454,yymsp[-4].minor.yy14,yymsp[-3].minor.yy454,yymsp[-1].minor.yy14,yymsp[-8].minor.yy144,yymsp[0].minor.yy454); + if( yymsp[-9].minor.yy555 ){ + yymsp[-9].minor.yy555->pWinDefn = yymsp[-2].minor.yy211; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy41); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy211); } } break; case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0); } break; - case 95: /* values ::= values COMMA LP nexprlist RP */ + case 95: /* oneselect ::= mvalues */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy47; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values|SF_MultiValue,0); - if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; - if( pRight ){ - pRight->op = TK_ALL; - pRight->pPrior = pLeft; - yymsp[-4].minor.yy47 = pRight; - }else{ - yymsp[-4].minor.yy47 = pLeft; - } + sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy555); } break; - case 96: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy394 = SF_Distinct;} + case 96: /* mvalues ::= values COMMA LP nexprlist RP */ + case 97: /* mvalues ::= mvalues COMMA LP nexprlist RP */ yytestcase(yyruleno==97); +{ + yymsp[-4].minor.yy555 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy555, yymsp[-1].minor.yy14); +} break; - case 97: /* distinct ::= ALL */ -{yymsp[0].minor.yy394 = SF_All;} + case 98: /* distinct ::= DISTINCT */ +{yymsp[0].minor.yy144 = SF_Distinct;} break; - case 99: /* sclp ::= */ - case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132); - case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142); - case 232: /* exprlist ::= */ yytestcase(yyruleno==232); - case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235); - case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240); -{yymsp[1].minor.yy322 = 0;} + case 99: /* distinct ::= ALL */ +{yymsp[0].minor.yy144 = SF_All;} break; - case 100: /* selcollist ::= sclp scanpt expr scanpt as */ + case 101: /* sclp ::= */ + case 134: /* orderby_opt ::= */ yytestcase(yyruleno==134); + case 144: /* groupby_opt ::= */ yytestcase(yyruleno==144); + case 234: /* exprlist ::= */ yytestcase(yyruleno==234); + case 237: /* paren_exprlist ::= */ yytestcase(yyruleno==237); + case 242: /* eidlist_opt ::= */ yytestcase(yyruleno==242); +{yymsp[1].minor.yy14 = 0;} + break; + case 102: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy322,yymsp[-3].minor.yy522,yymsp[-1].minor.yy522); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy14,yymsp[-3].minor.yy168,yymsp[-1].minor.yy168); } break; - case 101: /* selcollist ::= sclp scanpt STAR */ + case 103: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); - yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p); + yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, p); } break; - case 102: /* selcollist ::= sclp scanpt nm DOT STAR */ + case 104: /* selcollist ::= sclp scanpt nm DOT STAR */ { Expr *pRight, *pLeft, *pDot; pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, pDot); } break; - case 103: /* as ::= AS nm */ - case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115); - case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256); - case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257); + case 105: /* as ::= AS nm */ + case 117: /* dbnm ::= DOT nm */ yytestcase(yyruleno==117); + case 258: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==258); + case 259: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==259); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; - case 105: /* from ::= */ - case 108: /* stl_prefix ::= */ yytestcase(yyruleno==108); -{yymsp[1].minor.yy131 = 0;} + case 107: /* from ::= */ + case 110: /* stl_prefix ::= */ yytestcase(yyruleno==110); +{yymsp[1].minor.yy203 = 0;} break; - case 106: /* from ::= FROM seltablist */ + case 108: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; - sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy131); + yymsp[-1].minor.yy203 = yymsp[0].minor.yy203; + sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy203); } break; - case 107: /* stl_prefix ::= seltablist joinop */ + case 109: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy131 && yymsp[-1].minor.yy131->nSrc>0) ) yymsp[-1].minor.yy131->a[yymsp[-1].minor.yy131->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy394; + if( ALWAYS(yymsp[-1].minor.yy203 && yymsp[-1].minor.yy203->nSrc>0) ) yymsp[-1].minor.yy203->a[yymsp[-1].minor.yy203->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy144; } break; - case 109: /* seltablist ::= stl_prefix nm dbnm as on_using */ + case 111: /* seltablist ::= stl_prefix nm dbnm as on_using */ { - yymsp[-4].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy131,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); + yymsp[-4].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy203,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); } break; - case 110: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + case 112: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ { - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy561); - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy131, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy269); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-1].minor.yy0); } break; - case 111: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + case 113: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ { - yymsp[-7].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy131,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); - sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy131, yymsp[-3].minor.yy322); + yymsp[-7].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy203,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy203, yymsp[-3].minor.yy14); } break; - case 112: /* seltablist ::= stl_prefix LP select RP as on_using */ + case 114: /* seltablist ::= stl_prefix LP select RP as on_using */ { - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy47,&yymsp[0].minor.yy561); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy555,&yymsp[0].minor.yy269); } break; - case 113: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ + case 115: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ { - if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){ - yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131; - }else if( ALWAYS(yymsp[-3].minor.yy131!=0) && yymsp[-3].minor.yy131->nSrc==1 ){ - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); - if( yymsp[-5].minor.yy131 ){ - SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1]; - SrcItem *pOld = yymsp[-3].minor.yy131->a; + if( yymsp[-5].minor.yy203==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy269.pOn==0 && yymsp[0].minor.yy269.pUsing==0 ){ + yymsp[-5].minor.yy203 = yymsp[-3].minor.yy203; + }else if( ALWAYS(yymsp[-3].minor.yy203!=0) && yymsp[-3].minor.yy203->nSrc==1 ){ + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + if( yymsp[-5].minor.yy203 ){ + SrcItem *pNew = &yymsp[-5].minor.yy203->a[yymsp[-5].minor.yy203->nSrc-1]; + SrcItem *pOld = yymsp[-3].minor.yy203->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; @@ -174981,153 +176590,153 @@ static YYACTIONTYPE yy_reduce( pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy131); + sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy203); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy131); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy131,0,0,0,0,SF_NestedFrom,0); - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy561); + sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy203); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy203,0,0,0,0,SF_NestedFrom,0); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy269); } } break; - case 114: /* dbnm ::= */ - case 129: /* indexed_opt ::= */ yytestcase(yyruleno==129); + case 116: /* dbnm ::= */ + case 131: /* indexed_opt ::= */ yytestcase(yyruleno==131); {yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;} break; - case 116: /* fullname ::= nm */ + case 118: /* fullname ::= nm */ { - yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy131 = yylhsminor.yy131; + yymsp[0].minor.yy203 = yylhsminor.yy203; break; - case 117: /* fullname ::= nm DOT nm */ + case 119: /* fullname ::= nm DOT nm */ { - yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy131 = yylhsminor.yy131; + yymsp[-2].minor.yy203 = yylhsminor.yy203; break; - case 118: /* xfullname ::= nm */ -{yymsp[0].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} + case 120: /* xfullname ::= nm */ +{yymsp[0].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; - case 119: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 121: /* xfullname ::= nm DOT nm */ +{yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 120: /* xfullname ::= nm DOT nm AS nm */ + case 122: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy131 ) yymsp[-4].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy203 ) yymsp[-4].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 121: /* xfullname ::= nm AS nm */ + case 123: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy131 ) yymsp[-2].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy203 ) yymsp[-2].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 122: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy394 = JT_INNER; } + case 124: /* joinop ::= COMMA|JOIN */ +{ yymsp[0].minor.yy144 = JT_INNER; } break; - case 123: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} + case 125: /* joinop ::= JOIN_KW JOIN */ +{yymsp[-1].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; - case 124: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} + case 126: /* joinop ::= JOIN_KW nm JOIN */ +{yymsp[-2].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; - case 125: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} + case 127: /* joinop ::= JOIN_KW nm nm JOIN */ +{yymsp[-3].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; - case 126: /* on_using ::= ON expr */ -{yymsp[-1].minor.yy561.pOn = yymsp[0].minor.yy528; yymsp[-1].minor.yy561.pUsing = 0;} + case 128: /* on_using ::= ON expr */ +{yymsp[-1].minor.yy269.pOn = yymsp[0].minor.yy454; yymsp[-1].minor.yy269.pUsing = 0;} break; - case 127: /* on_using ::= USING LP idlist RP */ -{yymsp[-3].minor.yy561.pOn = 0; yymsp[-3].minor.yy561.pUsing = yymsp[-1].minor.yy254;} + case 129: /* on_using ::= USING LP idlist RP */ +{yymsp[-3].minor.yy269.pOn = 0; yymsp[-3].minor.yy269.pUsing = yymsp[-1].minor.yy132;} break; - case 128: /* on_using ::= */ -{yymsp[1].minor.yy561.pOn = 0; yymsp[1].minor.yy561.pUsing = 0;} + case 130: /* on_using ::= */ +{yymsp[1].minor.yy269.pOn = 0; yymsp[1].minor.yy269.pUsing = 0;} break; - case 130: /* indexed_by ::= INDEXED BY nm */ + case 132: /* indexed_by ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} break; - case 131: /* indexed_by ::= NOT INDEXED */ + case 133: /* indexed_by ::= NOT INDEXED */ {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; - case 133: /* orderby_opt ::= ORDER BY sortlist */ - case 143: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==143); -{yymsp[-2].minor.yy322 = yymsp[0].minor.yy322;} + case 135: /* orderby_opt ::= ORDER BY sortlist */ + case 145: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==145); +{yymsp[-2].minor.yy14 = yymsp[0].minor.yy14;} break; - case 134: /* sortlist ::= sortlist COMMA expr sortorder nulls */ + case 136: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322,yymsp[-2].minor.yy528); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); } break; - case 135: /* sortlist ::= expr sortorder nulls */ + case 137: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy528); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); + yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy454); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); } break; - case 136: /* sortorder ::= ASC */ -{yymsp[0].minor.yy394 = SQLITE_SO_ASC;} + case 138: /* sortorder ::= ASC */ +{yymsp[0].minor.yy144 = SQLITE_SO_ASC;} break; - case 137: /* sortorder ::= DESC */ -{yymsp[0].minor.yy394 = SQLITE_SO_DESC;} + case 139: /* sortorder ::= DESC */ +{yymsp[0].minor.yy144 = SQLITE_SO_DESC;} break; - case 138: /* sortorder ::= */ - case 141: /* nulls ::= */ yytestcase(yyruleno==141); -{yymsp[1].minor.yy394 = SQLITE_SO_UNDEFINED;} + case 140: /* sortorder ::= */ + case 143: /* nulls ::= */ yytestcase(yyruleno==143); +{yymsp[1].minor.yy144 = SQLITE_SO_UNDEFINED;} break; - case 139: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy394 = SQLITE_SO_ASC;} + case 141: /* nulls ::= NULLS FIRST */ +{yymsp[-1].minor.yy144 = SQLITE_SO_ASC;} break; - case 140: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy394 = SQLITE_SO_DESC;} + case 142: /* nulls ::= NULLS LAST */ +{yymsp[-1].minor.yy144 = SQLITE_SO_DESC;} break; - case 144: /* having_opt ::= */ - case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); - case 151: /* where_opt ::= */ yytestcase(yyruleno==151); - case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); - case 230: /* case_else ::= */ yytestcase(yyruleno==230); - case 231: /* case_operand ::= */ yytestcase(yyruleno==231); - case 250: /* vinto ::= */ yytestcase(yyruleno==250); -{yymsp[1].minor.yy528 = 0;} + case 146: /* having_opt ::= */ + case 148: /* limit_opt ::= */ yytestcase(yyruleno==148); + case 153: /* where_opt ::= */ yytestcase(yyruleno==153); + case 155: /* where_opt_ret ::= */ yytestcase(yyruleno==155); + case 232: /* case_else ::= */ yytestcase(yyruleno==232); + case 233: /* case_operand ::= */ yytestcase(yyruleno==233); + case 252: /* vinto ::= */ yytestcase(yyruleno==252); +{yymsp[1].minor.yy454 = 0;} break; - case 145: /* having_opt ::= HAVING expr */ - case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); - case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); - case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229); - case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249); -{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} + case 147: /* having_opt ::= HAVING expr */ + case 154: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==154); + case 156: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==156); + case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); + case 251: /* vinto ::= INTO expr */ yytestcase(yyruleno==251); +{yymsp[-1].minor.yy454 = yymsp[0].minor.yy454;} break; - case 147: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,0);} + case 149: /* limit_opt ::= LIMIT expr */ +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,0);} break; - case 148: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 150: /* limit_opt ::= LIMIT expr OFFSET expr */ +{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 149: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,yymsp[-2].minor.yy528);} + case 151: /* limit_opt ::= LIMIT expr COMMA expr */ +{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,yymsp[-2].minor.yy454);} break; - case 150: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + case 152: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy131, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy131,yymsp[0].minor.yy528,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy203, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy203,yymsp[0].minor.yy454,0,0); } break; - case 155: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-1].minor.yy528 = 0;} + case 157: /* where_opt_ret ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-1].minor.yy454 = 0;} break; - case 156: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-3].minor.yy528 = yymsp[-2].minor.yy528;} + case 158: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-3].minor.yy454 = yymsp[-2].minor.yy454;} break; - case 157: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + case 159: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy131, &yymsp[-4].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy322,"set list"); - if( yymsp[-1].minor.yy131 ){ - SrcList *pFromClause = yymsp[-1].minor.yy131; + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-4].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy14,"set list"); + if( yymsp[-1].minor.yy203 ){ + SrcList *pFromClause = yymsp[-1].minor.yy203; if( pFromClause->nSrc>1 ){ Select *pSubquery; Token as; @@ -175136,92 +176745,92 @@ static YYACTIONTYPE yy_reduce( as.z = 0; pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0); } - yymsp[-5].minor.yy131 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy131, pFromClause); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy203, pFromClause); } - sqlite3Update(pParse,yymsp[-5].minor.yy131,yymsp[-2].minor.yy322,yymsp[0].minor.yy528,yymsp[-6].minor.yy394,0,0,0); + sqlite3Update(pParse,yymsp[-5].minor.yy203,yymsp[-2].minor.yy14,yymsp[0].minor.yy454,yymsp[-6].minor.yy144,0,0,0); } break; - case 158: /* setlist ::= setlist COMMA nm EQ expr */ + case 160: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[0].minor.yy528); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy454); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, 1); } break; - case 159: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ + case 161: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy322 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy322, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); + yymsp[-6].minor.yy14 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy14, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); } break; - case 160: /* setlist ::= nm EQ expr */ + case 162: /* setlist ::= nm EQ expr */ { - yylhsminor.yy322 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy528); - sqlite3ExprListSetName(pParse, yylhsminor.yy322, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy454); + sqlite3ExprListSetName(pParse, yylhsminor.yy14, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy322 = yylhsminor.yy322; + yymsp[-2].minor.yy14 = yylhsminor.yy14; break; - case 161: /* setlist ::= LP idlist RP EQ expr */ + case 163: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); + yymsp[-4].minor.yy14 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); } break; - case 162: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + case 164: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy131, yymsp[-1].minor.yy47, yymsp[-2].minor.yy254, yymsp[-5].minor.yy394, yymsp[0].minor.yy444); + sqlite3Insert(pParse, yymsp[-3].minor.yy203, yymsp[-1].minor.yy555, yymsp[-2].minor.yy132, yymsp[-5].minor.yy144, yymsp[0].minor.yy122); } break; - case 163: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + case 165: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy131, 0, yymsp[-3].minor.yy254, yymsp[-6].minor.yy394, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy203, 0, yymsp[-3].minor.yy132, yymsp[-6].minor.yy144, 0); } break; - case 164: /* upsert ::= */ -{ yymsp[1].minor.yy444 = 0; } + case 166: /* upsert ::= */ +{ yymsp[1].minor.yy122 = 0; } break; - case 165: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy444 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy322); } + case 167: /* upsert ::= RETURNING selcollist */ +{ yymsp[-1].minor.yy122 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy14); } break; - case 166: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy322,yymsp[-6].minor.yy528,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,yymsp[0].minor.yy444);} + case 168: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ +{ yymsp[-11].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy14,yymsp[-6].minor.yy454,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,yymsp[0].minor.yy122);} break; - case 167: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy322,yymsp[-3].minor.yy528,0,0,yymsp[0].minor.yy444); } + case 169: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ +{ yymsp[-8].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy14,yymsp[-3].minor.yy454,0,0,yymsp[0].minor.yy122); } break; - case 168: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } + case 170: /* upsert ::= ON CONFLICT DO NOTHING returning */ +{ yymsp[-4].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; - case 169: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,0);} + case 171: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ +{ yymsp[-7].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,0);} break; - case 170: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322);} + case 172: /* returning ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14);} break; - case 173: /* idlist_opt ::= */ -{yymsp[1].minor.yy254 = 0;} + case 175: /* idlist_opt ::= */ +{yymsp[1].minor.yy132 = 0;} break; - case 174: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} + case 176: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy132 = yymsp[-1].minor.yy132;} break; - case 175: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);} + case 177: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy132 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy132,&yymsp[0].minor.yy0);} break; - case 176: /* idlist ::= nm */ -{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + case 178: /* idlist ::= nm */ +{yymsp[0].minor.yy132 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; - case 177: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} + case 179: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy454 = yymsp[-1].minor.yy454;} break; - case 178: /* expr ::= ID|INDEXED|JOIN_KW */ -{yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 180: /* expr ::= ID|INDEXED|JOIN_KW */ +{yymsp[0].minor.yy454=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 179: /* expr ::= nm DOT nm */ + case 181: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); - yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy528 = yylhsminor.yy528; + yymsp[-2].minor.yy454 = yylhsminor.yy454; break; - case 180: /* expr ::= nm DOT nm DOT nm */ + case 182: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); @@ -175230,27 +176839,27 @@ static YYACTIONTYPE yy_reduce( if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 181: /* term ::= NULL|FLOAT|BLOB */ - case 182: /* term ::= STRING */ yytestcase(yyruleno==182); -{yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 183: /* term ::= NULL|FLOAT|BLOB */ + case 184: /* term ::= STRING */ yytestcase(yyruleno==184); +{yymsp[0].minor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 183: /* term ::= INTEGER */ + case 185: /* term ::= INTEGER */ { - yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); - if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); + yylhsminor.yy454 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy454 ) yylhsminor.yy454->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy528 = yylhsminor.yy528; + yymsp[0].minor.yy454 = yylhsminor.yy454; break; - case 184: /* expr ::= VARIABLE */ + case 186: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy528 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy528, n); + yymsp[0].minor.yy454 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy454, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -175259,194 +176868,203 @@ static YYACTIONTYPE yy_reduce( assert( t.n>=2 ); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy528 = 0; + yymsp[0].minor.yy454 = 0; }else{ - yymsp[0].minor.yy528 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy528 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy528->iTable); + yymsp[0].minor.yy454 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy454 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy454->iTable); } } } break; - case 185: /* expr ::= expr COLLATE ID|STRING */ + case 187: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy454 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy454, &yymsp[0].minor.yy0, 1); } break; - case 186: /* expr ::= CAST LP expr AS typetoken RP */ + case 188: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); + yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy454, yymsp[-3].minor.yy454, 0); } break; - case 187: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy144); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy14, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy144); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-1].minor.yy14); } - yymsp[-7].minor.yy528 = yylhsminor.yy528; + yymsp[-7].minor.yy454 = yylhsminor.yy454; break; - case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + case 191: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy528 = yylhsminor.yy528; + yymsp[-3].minor.yy454 = yylhsminor.yy454; break; - case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + case 192: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy14, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy144); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); } - yymsp[-5].minor.yy528 = yylhsminor.yy528; + yymsp[-5].minor.yy454 = yylhsminor.yy454; break; - case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + case 193: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy14, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy144); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-2].minor.yy14); } - yymsp[-8].minor.yy528 = yylhsminor.yy528; + yymsp[-8].minor.yy454 = yylhsminor.yy454; break; - case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + case 194: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 193: /* term ::= CTIME_KW */ + case 195: /* term ::= CTIME_KW */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy528 = yylhsminor.yy528; + yymsp[0].minor.yy454 = yylhsminor.yy454; break; - case 194: /* expr ::= LP nexprlist COMMA expr RP */ + case 196: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy528->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy454->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); } } break; - case 195: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 197: /* expr ::= expr AND expr */ +{yymsp[-2].minor.yy454=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 196: /* expr ::= expr OR expr */ - case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201); - case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202); -{yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 198: /* expr ::= expr OR expr */ + case 199: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==200); + case 201: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==201); + case 202: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==202); + case 203: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==203); + case 204: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==204); +{yymsp[-2].minor.yy454=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 203: /* likeop ::= NOT LIKE_KW|MATCH */ + case 205: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 204: /* expr ::= expr likeop expr */ + case 206: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy528); - yymsp[-2].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy528, 0); - if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy454); + yymsp[-2].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy454, 0); + if( yymsp[-2].minor.yy454 ) yymsp[-2].minor.yy454->flags |= EP_InfixFunc; } break; - case 205: /* expr ::= expr likeop expr ESCAPE expr */ + case 207: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ) yymsp[-4].minor.yy454->flags |= EP_InfixFunc; } break; - case 206: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} + case 208: /* expr ::= expr ISNULL|NOTNULL */ +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy454,0);} break; - case 207: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} + case 209: /* expr ::= expr NOT NULL */ +{yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy454,0);} break; - case 208: /* expr ::= expr IS expr */ + case 210: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); + yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-2].minor.yy454, TK_ISNULL); } break; - case 209: /* expr ::= expr IS NOT expr */ + case 211: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); + yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-3].minor.yy454, TK_NOTNULL); } break; - case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */ + case 212: /* expr ::= expr IS NOT DISTINCT FROM expr */ { - yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL); + yymsp[-5].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-5].minor.yy454, TK_ISNULL); } break; - case 211: /* expr ::= expr IS DISTINCT FROM expr */ + case 213: /* expr ::= expr IS DISTINCT FROM expr */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-4].minor.yy454, TK_NOTNULL); } break; - case 212: /* expr ::= NOT expr */ - case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213); -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} + case 214: /* expr ::= NOT expr */ + case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy454, 0);/*A-overwrites-B*/} break; - case 214: /* expr ::= PLUS|MINUS expr */ + case 216: /* expr ::= PLUS|MINUS expr */ { - yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); - /*A-overwrites-B*/ + Expr *p = yymsp[0].minor.yy454; + u8 op = yymsp[-1].major + (TK_UPLUS-TK_PLUS); + assert( TK_UPLUS>TK_PLUS ); + assert( TK_UMINUS == TK_MINUS + (TK_UPLUS - TK_PLUS) ); + if( p && p->op==TK_UPLUS ){ + p->op = op; + yymsp[-1].minor.yy454 = p; + }else{ + yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, op, p, 0); + /*A-overwrites-B*/ + } } break; - case 215: /* expr ::= expr PTR expr */ + case 217: /* expr ::= expr PTR expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528); - yylhsminor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy454); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); } - yymsp[-2].minor.yy528 = yylhsminor.yy528; + yymsp[-2].minor.yy454 = yylhsminor.yy454; break; - case 216: /* between_op ::= BETWEEN */ - case 219: /* in_op ::= IN */ yytestcase(yyruleno==219); -{yymsp[0].minor.yy394 = 0;} + case 218: /* between_op ::= BETWEEN */ + case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); +{yymsp[0].minor.yy144 = 0;} break; - case 218: /* expr ::= expr between_op expr AND expr */ + case 220: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 221: /* expr ::= expr in_op LP exprlist RP */ + case 223: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy322==0 ){ + if( yymsp[-1].minor.yy14==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -175455,208 +177073,208 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy394 ? "true" : "false"); - if( yymsp[-4].minor.yy528 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy528); - }else{ - Expr *pRHS = yymsp[-1].minor.yy322->a[0].pExpr; - if( yymsp[-1].minor.yy322->nExpr==1 && sqlite3ExprIsConstant(pRHS) && yymsp[-4].minor.yy528->op!=TK_VECTOR ){ - yymsp[-1].minor.yy322->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy144 ? "true" : "false"); + if( yymsp[-4].minor.yy454 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy454); + }else{ + Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; + if( yymsp[-1].minor.yy14->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy454->op!=TK_VECTOR ){ + yymsp[-1].minor.yy14->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS); - }else if( yymsp[-1].minor.yy322->nExpr==1 && pRHS->op==TK_SELECT ){ - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pRHS->x.pSelect); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy454, pRHS); + }else if( yymsp[-1].minor.yy14->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pRHS->x.pSelect); pRHS->x.pSelect = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); - }else{ - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528==0 ){ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); - }else if( yymsp[-4].minor.yy528->pLeft->op==TK_VECTOR ){ - int nExpr = yymsp[-4].minor.yy528->pLeft->x.pList->nExpr; - Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy322); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + }else{ + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + }else if( yymsp[-4].minor.yy454->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy454->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy14); if( pSelectRHS ){ parserDoubleLinkSelect(pParse, pSelectRHS); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelectRHS); } }else{ - yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy322; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy14; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); } } - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } } break; - case 222: /* expr ::= LP select RP */ + case 224: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); + yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy454, yymsp[-1].minor.yy555); } break; - case 223: /* expr ::= expr in_op LP select RP */ + case 225: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, yymsp[-1].minor.yy555); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 226: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy322 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy322); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelect); - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[0].minor.yy14 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy14); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelect); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 225: /* expr ::= EXISTS LP select RP */ + case 227: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); + p = yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy555); } break; - case 226: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy528 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528) : yymsp[-2].minor.yy322; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy454 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454) : yymsp[-2].minor.yy14; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy322); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); } } break; - case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[0].minor.yy454); } break; - case 228: /* case_exprlist ::= WHEN expr THEN expr */ + case 230: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); + yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, yymsp[0].minor.yy454); } break; - case 233: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);} + case 235: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy454);} break; - case 234: /* nexprlist ::= expr */ -{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/} + case 236: /* nexprlist ::= expr */ +{yymsp[0].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy454); /*A-overwrites-Y*/} break; - case 236: /* paren_exprlist ::= LP exprlist RP */ - case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241); -{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;} + case 238: /* paren_exprlist ::= LP exprlist RP */ + case 243: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==243); +{yymsp[-2].minor.yy14 = yymsp[-1].minor.yy14;} break; - case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy528, SQLITE_SO_ASC, yymsp[-8].minor.yy394, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy144, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy454, SQLITE_SO_ASC, yymsp[-8].minor.yy144, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } } break; - case 238: /* uniqueflag ::= UNIQUE */ - case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280); -{yymsp[0].minor.yy394 = OE_Abort;} + case 240: /* uniqueflag ::= UNIQUE */ + case 282: /* raisetype ::= ABORT */ yytestcase(yyruleno==282); +{yymsp[0].minor.yy144 = OE_Abort;} break; - case 239: /* uniqueflag ::= */ -{yymsp[1].minor.yy394 = OE_None;} + case 241: /* uniqueflag ::= */ +{yymsp[1].minor.yy144 = OE_None;} break; - case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 244: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); + yymsp[-4].minor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); } break; - case 243: /* eidlist ::= nm collate sortorder */ + case 245: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/ + yymsp[-2].minor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); /*A-overwrites-Y*/ } break; - case 246: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);} + case 248: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy203, yymsp[-1].minor.yy144);} break; - case 247: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);} + case 249: /* cmd ::= VACUUM vinto */ +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy454);} break; - case 248: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);} + case 250: /* cmd ::= VACUUM nm vinto */ +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy454);} break; - case 251: /* cmd ::= PRAGMA nm dbnm */ + case 253: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 254: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 255: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 256: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 257: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 260: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy427, &all); } break; - case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 261: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy144, yymsp[-4].minor.yy286.a, yymsp[-4].minor.yy286.b, yymsp[-2].minor.yy203, yymsp[0].minor.yy454, yymsp[-10].minor.yy144, yymsp[-8].minor.yy144); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 260: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ } + case 262: /* trigger_time ::= BEFORE|AFTER */ +{ yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 261: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy394 = TK_INSTEAD;} + case 263: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy144 = TK_INSTEAD;} break; - case 262: /* trigger_time ::= */ -{ yymsp[1].minor.yy394 = TK_BEFORE; } + case 264: /* trigger_time ::= */ +{ yymsp[1].minor.yy144 = TK_BEFORE; } break; - case 263: /* trigger_event ::= DELETE|INSERT */ - case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264); -{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;} + case 265: /* trigger_event ::= DELETE|INSERT */ + case 266: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==266); +{yymsp[0].minor.yy286.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy286.b = 0;} break; - case 265: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;} + case 267: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy286.a = TK_UPDATE; yymsp[-2].minor.yy286.b = yymsp[0].minor.yy132;} break; - case 266: /* when_clause ::= */ - case 285: /* key_opt ::= */ yytestcase(yyruleno==285); -{ yymsp[1].minor.yy528 = 0; } + case 268: /* when_clause ::= */ + case 287: /* key_opt ::= */ yytestcase(yyruleno==287); +{ yymsp[1].minor.yy454 = 0; } break; - case 267: /* when_clause ::= WHEN expr */ - case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286); -{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; } + case 269: /* when_clause ::= WHEN expr */ + case 288: /* key_opt ::= KEY expr */ yytestcase(yyruleno==288); +{ yymsp[-1].minor.yy454 = yymsp[0].minor.yy454; } break; - case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 270: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy33!=0 ); - yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33; - yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33; + assert( yymsp[-2].minor.yy427!=0 ); + yymsp[-2].minor.yy427->pLast->pNext = yymsp[-1].minor.yy427; + yymsp[-2].minor.yy427->pLast = yymsp[-1].minor.yy427; } break; - case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 271: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy33!=0 ); - yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33; + assert( yymsp[-1].minor.yy427!=0 ); + yymsp[-1].minor.yy427->pLast = yymsp[-1].minor.yy427; } break; - case 270: /* trnm ::= nm DOT nm */ + case 272: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -175664,367 +177282,377 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 271: /* tridxby ::= INDEXED BY nm */ + case 273: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 272: /* tridxby ::= NOT INDEXED */ + case 274: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);} - yymsp[-8].minor.yy33 = yylhsminor.yy33; + case 275: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ +{yylhsminor.yy427 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy203, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454, yymsp[-7].minor.yy144, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy168);} + yymsp[-8].minor.yy427 = yylhsminor.yy427; break; - case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 276: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/ + yylhsminor.yy427 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy132,yymsp[-2].minor.yy555,yymsp[-6].minor.yy144,yymsp[-1].minor.yy122,yymsp[-7].minor.yy168,yymsp[0].minor.yy168);/*yylhsminor.yy427-overwrites-yymsp[-6].minor.yy144*/ } - yymsp[-7].minor.yy33 = yylhsminor.yy33; + yymsp[-7].minor.yy427 = yylhsminor.yy427; break; - case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);} - yymsp[-5].minor.yy33 = yylhsminor.yy33; + case 277: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ +{yylhsminor.yy427 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy454, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy168);} + yymsp[-5].minor.yy427 = yylhsminor.yy427; break; - case 276: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/} - yymsp[-2].minor.yy33 = yylhsminor.yy33; + case 278: /* trigger_cmd ::= scanpt select scanpt */ +{yylhsminor.yy427 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy555, yymsp[-2].minor.yy168, yymsp[0].minor.yy168); /*yylhsminor.yy427-overwrites-yymsp[-1].minor.yy555*/} + yymsp[-2].minor.yy427 = yylhsminor.yy427; break; - case 277: /* expr ::= RAISE LP IGNORE RP */ + case 279: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy528 ){ - yymsp[-3].minor.yy528->affExpr = OE_Ignore; + yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy454 ){ + yymsp[-3].minor.yy454->affExpr = OE_Ignore; } } break; - case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 280: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy528 ) { - yymsp[-5].minor.yy528->affExpr = (char)yymsp[-3].minor.yy394; + yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy454 ) { + yymsp[-5].minor.yy454->affExpr = (char)yymsp[-3].minor.yy144; } } break; - case 279: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy394 = OE_Rollback;} + case 281: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy144 = OE_Rollback;} break; - case 281: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy394 = OE_Fail;} + case 283: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy144 = OE_Fail;} break; - case 282: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 284: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy203,yymsp[-1].minor.yy144); } break; - case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 285: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528); + sqlite3Attach(pParse, yymsp[-3].minor.yy454, yymsp[-1].minor.yy454, yymsp[0].minor.yy454); } break; - case 284: /* cmd ::= DETACH database_kw_opt expr */ + case 286: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy528); + sqlite3Detach(pParse, yymsp[0].minor.yy454); } break; - case 287: /* cmd ::= REINDEX */ + case 289: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 288: /* cmd ::= REINDEX nm dbnm */ + case 290: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 289: /* cmd ::= ANALYZE */ + case 291: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 290: /* cmd ::= ANALYZE nm dbnm */ + case 292: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 293: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy203,&yymsp[0].minor.yy0); } break; - case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 294: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + case 295: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy203, &yymsp[0].minor.yy0); } break; - case 294: /* add_column_fullname ::= fullname */ + case 296: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy203); } break; - case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 297: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy203, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 296: /* cmd ::= create_vtab */ + case 298: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 297: /* cmd ::= create_vtab LP vtabarglist RP */ + case 299: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 300: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy144); } break; - case 299: /* vtabarg ::= */ + case 301: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 300: /* vtabargtoken ::= ANY */ - case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301); - case 302: /* lp ::= LP */ yytestcase(yyruleno==302); + case 302: /* vtabargtoken ::= ANY */ + case 303: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==303); + case 304: /* lp ::= LP */ yytestcase(yyruleno==304); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 303: /* with ::= WITH wqlist */ - case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); } + case 305: /* with ::= WITH wqlist */ + case 306: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==306); +{ sqlite3WithPush(pParse, yymsp[0].minor.yy59, 1); } break; - case 305: /* wqas ::= AS */ -{yymsp[0].minor.yy516 = M10d_Any;} + case 307: /* wqas ::= AS */ +{yymsp[0].minor.yy462 = M10d_Any;} break; - case 306: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy516 = M10d_Yes;} + case 308: /* wqas ::= AS MATERIALIZED */ +{yymsp[-1].minor.yy462 = M10d_Yes;} break; - case 307: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy516 = M10d_No;} + case 309: /* wqas ::= AS NOT MATERIALIZED */ +{yymsp[-2].minor.yy462 = M10d_No;} break; - case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */ + case 310: /* wqitem ::= withnm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/ + yymsp[-5].minor.yy67 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy555, yymsp[-3].minor.yy462); /*A-overwrites-X*/ } break; - case 309: /* wqlist ::= wqitem */ + case 311: /* withnm ::= nm */ +{pParse->bHasWith = 1;} + break; + case 312: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/ + yymsp[0].minor.yy59 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy67); /*A-overwrites-X*/ } break; - case 310: /* wqlist ::= wqlist COMMA wqitem */ + case 313: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); + yymsp[-2].minor.yy59 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy59, yymsp[0].minor.yy67); } break; - case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 314: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy41!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41); - yymsp[0].minor.yy41->pNextWin = yymsp[-2].minor.yy41; - yylhsminor.yy41 = yymsp[0].minor.yy41; + assert( yymsp[0].minor.yy211!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy211); + yymsp[0].minor.yy211->pNextWin = yymsp[-2].minor.yy211; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[-2].minor.yy41 = yylhsminor.yy41; + yymsp[-2].minor.yy211 = yylhsminor.yy211; break; - case 312: /* windowdefn ::= nm AS LP window RP */ + case 315: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy41) ){ - yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy211) ){ + yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy41 = yymsp[-1].minor.yy41; + yylhsminor.yy211 = yymsp[-1].minor.yy211; } - yymsp[-4].minor.yy41 = yylhsminor.yy41; + yymsp[-4].minor.yy211 = yylhsminor.yy211; break; - case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 316: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); + yymsp[-4].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, 0); } break; - case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 317: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy41 = yylhsminor.yy41; + yymsp[-5].minor.yy211 = yylhsminor.yy211; break; - case 315: /* window ::= ORDER BY sortlist frame_opt */ + case 318: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); + yymsp[-3].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, 0); } break; - case 316: /* window ::= nm ORDER BY sortlist frame_opt */ + case 319: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy41 = yylhsminor.yy41; + yymsp[-4].minor.yy211 = yylhsminor.yy211; break; - case 317: /* window ::= nm frame_opt */ + case 320: /* window ::= nm frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy41 = yylhsminor.yy41; + yymsp[-1].minor.yy211 = yylhsminor.yy211; break; - case 318: /* frame_opt ::= */ + case 321: /* frame_opt ::= */ { - yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy211 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 322: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); + yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy144, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy462); } - yymsp[-2].minor.yy41 = yylhsminor.yy41; + yymsp[-2].minor.yy211 = yylhsminor.yy211; break; - case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 323: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); + yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy144, yymsp[-3].minor.yy509.eType, yymsp[-3].minor.yy509.pExpr, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, yymsp[0].minor.yy462); } - yymsp[-5].minor.yy41 = yylhsminor.yy41; + yymsp[-5].minor.yy211 = yylhsminor.yy211; break; - case 322: /* frame_bound_s ::= frame_bound */ - case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); -{yylhsminor.yy595 = yymsp[0].minor.yy595;} - yymsp[0].minor.yy595 = yylhsminor.yy595; + case 325: /* frame_bound_s ::= frame_bound */ + case 327: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==327); +{yylhsminor.yy509 = yymsp[0].minor.yy509;} + yymsp[0].minor.yy509 = yylhsminor.yy509; break; - case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); - case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); -{yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} - yymsp[-1].minor.yy595 = yylhsminor.yy595; + case 326: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 328: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==328); + case 330: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==330); +{yylhsminor.yy509.eType = yymsp[-1].major; yylhsminor.yy509.pExpr = 0;} + yymsp[-1].minor.yy509 = yylhsminor.yy509; break; - case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} - yymsp[-1].minor.yy595 = yylhsminor.yy595; + case 329: /* frame_bound ::= expr PRECEDING|FOLLOWING */ +{yylhsminor.yy509.eType = yymsp[0].major; yylhsminor.yy509.pExpr = yymsp[-1].minor.yy454;} + yymsp[-1].minor.yy509 = yylhsminor.yy509; break; - case 328: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy516 = 0;} + case 331: /* frame_exclude_opt ::= */ +{yymsp[1].minor.yy462 = 0;} break; - case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} + case 332: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ +{yymsp[-1].minor.yy462 = yymsp[0].minor.yy462;} break; - case 330: /* frame_exclude ::= NO OTHERS */ - case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); -{yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} + case 333: /* frame_exclude ::= NO OTHERS */ + case 334: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==334); +{yymsp[-1].minor.yy462 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 332: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} + case 335: /* frame_exclude ::= GROUP|TIES */ +{yymsp[0].minor.yy462 = yymsp[0].major; /*A-overwrites-X*/} break; - case 333: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } + case 336: /* window_clause ::= WINDOW windowdefn_list */ +{ yymsp[-1].minor.yy211 = yymsp[0].minor.yy211; } break; - case 334: /* filter_over ::= filter_clause over_clause */ + case 337: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy41 ){ - yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528; + if( yymsp[0].minor.yy211 ){ + yymsp[0].minor.yy211->pFilter = yymsp[-1].minor.yy454; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); } - yylhsminor.yy41 = yymsp[0].minor.yy41; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[-1].minor.yy41 = yylhsminor.yy41; + yymsp[-1].minor.yy211 = yylhsminor.yy211; break; - case 335: /* filter_over ::= over_clause */ + case 338: /* filter_over ::= over_clause */ { - yylhsminor.yy41 = yymsp[0].minor.yy41; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[0].minor.yy41 = yylhsminor.yy41; + yymsp[0].minor.yy211 = yylhsminor.yy211; break; - case 336: /* filter_over ::= filter_clause */ + case 339: /* filter_over ::= filter_clause */ { - yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy41 ){ - yylhsminor.yy41->eFrmType = TK_FILTER; - yylhsminor.yy41->pFilter = yymsp[0].minor.yy528; + yylhsminor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy211 ){ + yylhsminor.yy211->eFrmType = TK_FILTER; + yylhsminor.yy211->pFilter = yymsp[0].minor.yy454; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy528); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy454); } } - yymsp[0].minor.yy41 = yylhsminor.yy41; + yymsp[0].minor.yy211 = yylhsminor.yy211; break; - case 337: /* over_clause ::= OVER LP window RP */ + case 340: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; - assert( yymsp[-3].minor.yy41!=0 ); + yymsp[-3].minor.yy211 = yymsp[-1].minor.yy211; + assert( yymsp[-3].minor.yy211!=0 ); } break; - case 338: /* over_clause ::= OVER nm */ + case 341: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy41 ){ - yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy211 ){ + yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; - case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } + case 342: /* filter_clause ::= FILTER LP WHERE expr RP */ +{ yymsp[-4].minor.yy454 = yymsp[-1].minor.yy454; } + break; + case 343: /* term ::= QNUMBER */ +{ + yylhsminor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); + sqlite3DequoteNumber(pParse, yylhsminor.yy454); +} + yymsp[0].minor.yy454 = yylhsminor.yy454; break; default: - /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); - /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); - /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); - /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); - /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); - /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); - /* (346) trans_opt ::= */ yytestcase(yyruleno==346); - /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); - /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); - /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); - /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); - /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); - /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); - /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); - /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); - /* (355) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==355); - /* (356) nm ::= STRING */ yytestcase(yyruleno==356); - /* (357) typetoken ::= typename */ yytestcase(yyruleno==357); - /* (358) typename ::= ID|STRING */ yytestcase(yyruleno==358); - /* (359) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=359); - /* (360) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); - /* (361) carglist ::= carglist ccons */ yytestcase(yyruleno==361); - /* (362) carglist ::= */ yytestcase(yyruleno==362); - /* (363) ccons ::= NULL onconf */ yytestcase(yyruleno==363); - /* (364) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==364); - /* (365) ccons ::= AS generated */ yytestcase(yyruleno==365); - /* (366) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==366); - /* (367) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==367); - /* (368) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=368); - /* (369) tconscomma ::= */ yytestcase(yyruleno==369); - /* (370) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=370); - /* (371) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=371); - /* (372) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=372); - /* (373) oneselect ::= values */ yytestcase(yyruleno==373); - /* (374) sclp ::= selcollist COMMA */ yytestcase(yyruleno==374); - /* (375) as ::= ID|STRING */ yytestcase(yyruleno==375); - /* (376) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=376); - /* (377) returning ::= */ yytestcase(yyruleno==377); - /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); - /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); - /* (380) case_operand ::= expr */ yytestcase(yyruleno==380); - /* (381) exprlist ::= nexprlist */ yytestcase(yyruleno==381); - /* (382) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=382); - /* (383) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=383); - /* (384) nmnum ::= ON */ yytestcase(yyruleno==384); - /* (385) nmnum ::= DELETE */ yytestcase(yyruleno==385); - /* (386) nmnum ::= DEFAULT */ yytestcase(yyruleno==386); - /* (387) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==387); - /* (388) foreach_clause ::= */ yytestcase(yyruleno==388); - /* (389) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==389); - /* (390) trnm ::= nm */ yytestcase(yyruleno==390); - /* (391) tridxby ::= */ yytestcase(yyruleno==391); - /* (392) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==392); - /* (393) database_kw_opt ::= */ yytestcase(yyruleno==393); - /* (394) kwcolumn_opt ::= */ yytestcase(yyruleno==394); - /* (395) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==395); - /* (396) vtabarglist ::= vtabarg */ yytestcase(yyruleno==396); - /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==397); - /* (398) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==398); - /* (399) anylist ::= */ yytestcase(yyruleno==399); - /* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400); - /* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401); - /* (402) with ::= */ yytestcase(yyruleno==402); - /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403); - /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404); + /* (344) input ::= cmdlist */ yytestcase(yyruleno==344); + /* (345) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==345); + /* (346) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=346); + /* (347) ecmd ::= SEMI */ yytestcase(yyruleno==347); + /* (348) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==348); + /* (349) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=349); + /* (350) trans_opt ::= */ yytestcase(yyruleno==350); + /* (351) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==351); + /* (352) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==352); + /* (353) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==353); + /* (354) savepoint_opt ::= */ yytestcase(yyruleno==354); + /* (355) cmd ::= create_table create_table_args */ yytestcase(yyruleno==355); + /* (356) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=356); + /* (357) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==357); + /* (358) columnlist ::= columnname carglist */ yytestcase(yyruleno==358); + /* (359) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==359); + /* (360) nm ::= STRING */ yytestcase(yyruleno==360); + /* (361) typetoken ::= typename */ yytestcase(yyruleno==361); + /* (362) typename ::= ID|STRING */ yytestcase(yyruleno==362); + /* (363) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=363); + /* (364) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=364); + /* (365) carglist ::= carglist ccons */ yytestcase(yyruleno==365); + /* (366) carglist ::= */ yytestcase(yyruleno==366); + /* (367) ccons ::= NULL onconf */ yytestcase(yyruleno==367); + /* (368) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==368); + /* (369) ccons ::= AS generated */ yytestcase(yyruleno==369); + /* (370) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==370); + /* (371) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==371); + /* (372) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) tconscomma ::= */ yytestcase(yyruleno==373); + /* (374) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=374); + /* (375) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=375); + /* (376) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=376); + /* (377) oneselect ::= values */ yytestcase(yyruleno==377); + /* (378) sclp ::= selcollist COMMA */ yytestcase(yyruleno==378); + /* (379) as ::= ID|STRING */ yytestcase(yyruleno==379); + /* (380) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=380); + /* (381) returning ::= */ yytestcase(yyruleno==381); + /* (382) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==383); + /* (384) case_operand ::= expr */ yytestcase(yyruleno==384); + /* (385) exprlist ::= nexprlist */ yytestcase(yyruleno==385); + /* (386) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=386); + /* (387) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=387); + /* (388) nmnum ::= ON */ yytestcase(yyruleno==388); + /* (389) nmnum ::= DELETE */ yytestcase(yyruleno==389); + /* (390) nmnum ::= DEFAULT */ yytestcase(yyruleno==390); + /* (391) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==391); + /* (392) foreach_clause ::= */ yytestcase(yyruleno==392); + /* (393) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==393); + /* (394) trnm ::= nm */ yytestcase(yyruleno==394); + /* (395) tridxby ::= */ yytestcase(yyruleno==395); + /* (396) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==396); + /* (397) database_kw_opt ::= */ yytestcase(yyruleno==397); + /* (398) kwcolumn_opt ::= */ yytestcase(yyruleno==398); + /* (399) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==399); + /* (400) vtabarglist ::= vtabarg */ yytestcase(yyruleno==400); + /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==401); + /* (402) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==402); + /* (403) anylist ::= */ yytestcase(yyruleno==403); + /* (404) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==404); + /* (405) anylist ::= anylist ANY */ yytestcase(yyruleno==405); + /* (406) with ::= */ yytestcase(yyruleno==406); + /* (407) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=407); + /* (408) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=408); break; /********** End reduce actions ************************************************/ }; @@ -176211,19 +177839,12 @@ SQLITE_PRIVATE void sqlite3Parser( (int)(yypParser->yytos - yypParser->yystack)); } #endif -#if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - break; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); break; } } -#endif } yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor sqlite3ParserCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ @@ -177294,27 +178915,58 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ *tokenType = TK_INTEGER; #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ - for(i=3; sqlite3Isxdigit(z[i]); i++){} - return i; - } + for(i=3; 1; i++){ + if( sqlite3Isxdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + }else #endif - for(i=0; sqlite3Isdigit(z[i]); i++){} + { + for(i=0; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } #ifndef SQLITE_OMIT_FLOATING_POINT - if( z[i]=='.' ){ - i++; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } - if( (z[i]=='e' || z[i]=='E') && - ( sqlite3Isdigit(z[i+1]) - || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) - ) - ){ - i += 2; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } + if( z[i]=='.' ){ + if( *tokenType==TK_INTEGER ) *tokenType = TK_FLOAT; + for(i++; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + } + if( (z[i]=='e' || z[i]=='E') && + ( sqlite3Isdigit(z[i+1]) + || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) + ) + ){ + if( *tokenType==TK_INTEGER ) *tokenType = TK_FLOAT; + for(i+=2; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + } #endif + } while( IdChar(z[i]) ){ *tokenType = TK_ILLEGAL; i++; @@ -177479,10 +179131,13 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( tokenType>=TK_WINDOW ){ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW + || tokenType==TK_QNUMBER ); #else if( tokenType>=TK_SPACE ){ - assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL ); + assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL + || tokenType==TK_QNUMBER + ); #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ pParse->rc = SQLITE_INTERRUPT; @@ -177515,7 +179170,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ - }else{ + }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; x.n = n; @@ -178866,6 +180521,18 @@ SQLITE_API int sqlite3_config(int op, ...){ } #endif /* SQLITE_OMIT_DESERIALIZE */ + case SQLITE_CONFIG_ROWID_IN_VIEW: { + int *pVal = va_arg(ap,int*); +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + if( 0==*pVal ) sqlite3GlobalConfig.mNoVisibleRowid = TF_NoVisibleRowid; + if( 1==*pVal ) sqlite3GlobalConfig.mNoVisibleRowid = 0; + *pVal = (sqlite3GlobalConfig.mNoVisibleRowid==0); +#else + *pVal = 0; +#endif + break; + } + default: { rc = SQLITE_ERROR; break; @@ -188498,22 +190165,24 @@ static int fts3IntegrityMethod( char **pzErr /* Write error message here */ ){ Fts3Table *p = (Fts3Table*)pVtab; - int rc; + int rc = SQLITE_OK; int bOk = 0; UNUSED_PARAMETER(isQuick); rc = sqlite3Fts3IntegrityCheck(p, &bOk); - assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 ); - if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){ + assert( rc!=SQLITE_CORRUPT_VTAB ); + if( rc==SQLITE_ERROR || (rc&0xFF)==SQLITE_CORRUPT ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS%d table %s.%s: %s", p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); - }else if( bOk==0 ){ + if( *pzErr ) rc = SQLITE_OK; + }else if( rc==SQLITE_OK && bOk==0 ){ *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", p->bFts4 ? 4 : 3, zSchema, zTabname); + if( *pzErr==0 ) rc = SQLITE_NOMEM; } sqlite3Fts3SegmentsClose(p); - return SQLITE_OK; + return rc; } @@ -200175,7 +201844,12 @@ SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + if( rc==SQLITE_CORRUPT_VTAB ){ + rc = SQLITE_OK; + *pbOk = 0; + }else{ + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + } return rc; } @@ -201081,7 +202755,7 @@ static void fts3SnippetDetails( } mCover |= mPhrase; - for(j=0; jnToken; j++){ + for(j=0; jnToken && jnSnippet; j++){ mHighlight |= (mPos>>j); } @@ -203742,7 +205416,6 @@ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ } } - /* Append formatted text (not to exceed N bytes) to the JsonString. */ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ @@ -203800,6 +205473,40 @@ static void jsonAppendSeparator(JsonString *p){ jsonAppendChar(p, ','); } +/* c is a control character. Append the canonical JSON representation +** of that control character to p. +** +** This routine assumes that the output buffer has already been enlarged +** sufficiently to hold the worst-case encoding plus a nul terminator. +*/ +static void jsonAppendControlChar(JsonString *p, u8 c){ + static const char aSpecial[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + assert( sizeof(aSpecial)==32 ); + assert( aSpecial['\b']=='b' ); + assert( aSpecial['\f']=='f' ); + assert( aSpecial['\n']=='n' ); + assert( aSpecial['\r']=='r' ); + assert( aSpecial['\t']=='t' ); + assert( c>=0 && cnUsed+7 <= p->nAlloc ); + if( aSpecial[c] ){ + p->zBuf[p->nUsed] = '\\'; + p->zBuf[p->nUsed+1] = aSpecial[c]; + p->nUsed += 2; + }else{ + p->zBuf[p->nUsed] = '\\'; + p->zBuf[p->nUsed+1] = 'u'; + p->zBuf[p->nUsed+2] = '0'; + p->zBuf[p->nUsed+3] = '0'; + p->zBuf[p->nUsed+4] = "0123456789abcdef"[c>>4]; + p->zBuf[p->nUsed+5] = "0123456789abcdef"[c&0xf]; + p->nUsed += 6; + } +} + /* Append the N-byte string in zIn to the end of the JsonString string ** under construction. Enclose the string in double-quotes ("...") and ** escape any double-quotes or backslash characters contained within the @@ -203859,35 +205566,14 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ } c = z[0]; if( c=='"' || c=='\\' ){ - json_simple_escape: if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = c; }else if( c=='\'' ){ p->zBuf[p->nUsed++] = c; }else{ - static const char aSpecial[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - assert( sizeof(aSpecial)==32 ); - assert( aSpecial['\b']=='b' ); - assert( aSpecial['\f']=='f' ); - assert( aSpecial['\n']=='n' ); - assert( aSpecial['\r']=='r' ); - assert( aSpecial['\t']=='t' ); - assert( c>=0 && cnUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return; - p->zBuf[p->nUsed++] = '\\'; - p->zBuf[p->nUsed++] = 'u'; - p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; - p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; + jsonAppendControlChar(p, c); } z++; N--; @@ -204588,7 +206274,10 @@ static u32 jsonbValidityCheck( if( !jsonIsOk[z[j]] && z[j]!='\'' ){ if( z[j]=='"' ){ if( x==JSONB_TEXTJ ) return j+1; - }else if( z[j]!='\\' || j+1>=k ){ + }else if( z[j]<=0x1f ){ + /* Control characters in JSON5 string literals are ok */ + if( x==JSONB_TEXTJ ) return j+1; + }else if( NEVER(z[j]!='\\') || j+1>=k ){ return j+1; }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){ j++; @@ -204786,6 +206475,7 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ case '[': { /* Parse array */ iThis = pParse->nBlob; + assert( i<=(u32)pParse->nJson ); jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0); iStart = pParse->nBlob; if( pParse->oom ) return -1; @@ -204882,9 +206572,14 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ return -1; } }else if( c<=0x1f ){ - /* Control characters are not allowed in strings */ - pParse->iErr = j; - return -1; + if( c==0 ){ + pParse->iErr = j; + return -1; + } + /* Control characters are not allowed in canonical JSON string + ** literals, but are allowed in JSON5 string literals. */ + opcode = JSONB_TEXT5; + pParse->hasNonstd = 1; }else if( c=='"' ){ opcode = JSONB_TEXT5; } @@ -205100,6 +206795,7 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ return i+4; } /* fall-through into the default case that checks for NaN */ + /* no break */ deliberate_fall_through } default: { u32 k; @@ -205184,6 +206880,10 @@ static void jsonReturnStringAsBlob(JsonString *pStr){ JsonParse px; memset(&px, 0, sizeof(px)); jsonStringTerminate(pStr); + if( pStr->eErr ){ + sqlite3_result_error_nomem(pStr->pCtx); + return; + } px.zJson = pStr->zBuf; px.nJson = pStr->nUsed; px.db = sqlite3_context_db_handle(pStr->pCtx); @@ -205364,7 +207064,7 @@ static u32 jsonTranslateBlobToText( zIn = (const char*)&pParse->aBlob[i+n]; jsonAppendChar(pOut, '"'); while( sz2>0 ){ - for(k=0; k0 ){ jsonAppendRawNZ(pOut, zIn, k); if( k>=sz2 ){ @@ -205379,6 +207079,13 @@ static u32 jsonTranslateBlobToText( sz2--; continue; } + if( zIn[0]<=0x1f ){ + if( pOut->nUsed+7>pOut->nAlloc && jsonStringGrow(pOut,7) ) break; + jsonAppendControlChar(pOut, zIn[0]); + zIn++; + sz2--; + continue; + } assert( zIn[0]=='\\' ); assert( sz2>=1 ); if( sz2<2 ){ @@ -205481,6 +207188,112 @@ static u32 jsonTranslateBlobToText( return i+n+sz; } +/* Context for recursion of json_pretty() +*/ +typedef struct JsonPretty JsonPretty; +struct JsonPretty { + JsonParse *pParse; /* The BLOB being rendered */ + JsonString *pOut; /* Generate pretty output into this string */ + const char *zIndent; /* Use this text for indentation */ + u32 szIndent; /* Bytes in zIndent[] */ + u32 nIndent; /* Current level of indentation */ +}; + +/* Append indentation to the pretty JSON under construction */ +static void jsonPrettyIndent(JsonPretty *pPretty){ + u32 jj; + for(jj=0; jjnIndent; jj++){ + jsonAppendRaw(pPretty->pOut, pPretty->zIndent, pPretty->szIndent); + } +} + +/* +** Translate the binary JSONB representation of JSON beginning at +** pParse->aBlob[i] into a JSON text string. Append the JSON +** text onto the end of pOut. Return the index in pParse->aBlob[] +** of the first byte past the end of the element that is translated. +** +** This is a variant of jsonTranslateBlobToText() that "pretty-prints" +** the output. Extra whitespace is inserted to make the JSON easier +** for humans to read. +** +** If an error is detected in the BLOB input, the pOut->eErr flag +** might get set to JSTRING_MALFORMED. But not all BLOB input errors +** are detected. So a malformed JSONB input might either result +** in an error, or in incorrect JSON. +** +** The pOut->eErr JSTRING_OOM flag is set on a OOM. +*/ +static u32 jsonTranslateBlobToPrettyText( + JsonPretty *pPretty, /* Pretty-printing context */ + u32 i /* Start rendering at this index */ +){ + u32 sz, n, j, iEnd; + const JsonParse *pParse = pPretty->pParse; + JsonString *pOut = pPretty->pOut; + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + pOut->eErr |= JSTRING_MALFORMED; + return pParse->nBlob+1; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_ARRAY: { + j = i+n; + iEnd = j+sz; + jsonAppendChar(pOut, '['); + if( jnIndent++; + while( pOut->eErr==0 ){ + jsonPrettyIndent(pPretty); + j = jsonTranslateBlobToPrettyText(pPretty, j); + if( j>=iEnd ) break; + jsonAppendRawNZ(pOut, ",\n", 2); + } + jsonAppendChar(pOut, '\n'); + pPretty->nIndent--; + jsonPrettyIndent(pPretty); + } + jsonAppendChar(pOut, ']'); + i = iEnd; + break; + } + case JSONB_OBJECT: { + j = i+n; + iEnd = j+sz; + jsonAppendChar(pOut, '{'); + if( jnIndent++; + while( pOut->eErr==0 ){ + jsonPrettyIndent(pPretty); + j = jsonTranslateBlobToText(pParse, j, pOut); + if( j>iEnd ){ + pOut->eErr |= JSTRING_MALFORMED; + break; + } + jsonAppendRawNZ(pOut, ": ", 2); + j = jsonTranslateBlobToPrettyText(pPretty, j); + if( j>=iEnd ) break; + jsonAppendRawNZ(pOut, ",\n", 2); + } + jsonAppendChar(pOut, '\n'); + pPretty->nIndent--; + jsonPrettyIndent(pPretty); + } + jsonAppendChar(pOut, '}'); + i = iEnd; + break; + } + default: { + i = jsonTranslateBlobToText(pParse, i, pOut); + break; + } + } + return i; +} + + /* Return true if the input pJson ** ** For performance reasons, this routine does not do a detailed check of the @@ -206509,8 +208322,9 @@ static JsonParse *jsonParseFuncArg( } p->zJson = (char*)sqlite3_value_text(pArg); p->nJson = sqlite3_value_bytes(pArg); + if( db->mallocFailed ) goto json_pfa_oom; if( p->nJson==0 ) goto json_pfa_malformed; - if( NEVER(p->zJson==0) ) goto json_pfa_oom; + assert( p->zJson!=0 ); if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){ if( flgs & JSON_KEEPERROR ){ p->nErr = 1; @@ -206676,10 +208490,10 @@ static void jsonDebugPrintBlob( if( sz==0 && x<=JSONB_FALSE ){ sqlite3_str_append(pOut, "\n", 1); }else{ - u32 i; + u32 j; sqlite3_str_appendall(pOut, ": \""); - for(i=iStart+n; iaBlob[i]; + for(j=iStart+n; jaBlob[j]; if( c<0x20 || c>=0x7f ) c = '.'; sqlite3_str_append(pOut, (char*)&c, 1); } @@ -206730,11 +208544,12 @@ static void jsonParseFunc( if( p==0 ) return; if( argc==1 ){ jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out); - sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8); + sqlite3_result_text64(ctx,out.zText,out.nChar,SQLITE_TRANSIENT,SQLITE_UTF8); }else{ jsonShowParse(p); } jsonParseFree(p); + sqlite3_str_reset(&out); } #endif /* SQLITE_DEBUG */ @@ -206833,13 +208648,6 @@ static void jsonArrayLengthFunc( jsonParseFree(p); } -/* True if the string is all digits */ -static int jsonAllDigits(const char *z, int n){ - int i; - for(i=0; i $[NUMBER] // Not PG. Purely for convenience */ jsonStringInit(&jx, ctx); - if( jsonAllDigits(zPath, nPath) ){ + if( sqlite3_value_type(argv[i])==SQLITE_INTEGER ){ jsonAppendRawNZ(&jx, "[", 1); jsonAppendRaw(&jx, zPath, nPath); jsonAppendRawNZ(&jx, "]", 2); @@ -207398,6 +209206,40 @@ static void jsonTypeFunc( jsonParseFree(p); } +/* +** json_pretty(JSON) +** json_pretty(JSON, INDENT) +** +** Return text that is a pretty-printed rendering of the input JSON. +** If the argument is not valid JSON, return NULL. +** +** The INDENT argument is text that is used for indentation. If omitted, +** it defaults to four spaces (the same as PostgreSQL). +*/ +static void jsonPrettyFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + JsonString s; /* The output string */ + JsonPretty x; /* Pretty printing context */ + + memset(&x, 0, sizeof(x)); + x.pParse = jsonParseFuncArg(ctx, argv[0], 0); + if( x.pParse==0 ) return; + x.pOut = &s; + jsonStringInit(&s, ctx); + if( argc==1 || (x.zIndent = (const char*)sqlite3_value_text(argv[1]))==0 ){ + x.zIndent = " "; + x.szIndent = 4; + }else{ + x.szIndent = (u32)strlen(x.zIndent); + } + jsonTranslateBlobToPrettyText(&x, 0); + jsonReturnString(&s, 0, 0); + jsonParseFree(x.pParse); +} + /* ** json_valid(JSON) ** json_valid(JSON, FLAGS) @@ -208087,6 +209929,9 @@ static int jsonEachColumn( case JEACH_VALUE: { u32 i = jsonSkipLabel(p); jsonReturnFromBlob(&p->sParse, i, ctx, 1); + if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY ){ + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } break; } case JEACH_TYPE: { @@ -208133,9 +209978,9 @@ static int jsonEachColumn( case JEACH_JSON: { if( p->sParse.zJson==0 ){ sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob, - SQLITE_STATIC); + SQLITE_TRANSIENT); }else{ - sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC); + sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_TRANSIENT); } break; } @@ -208409,6 +210254,8 @@ SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc), JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc), JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc), + JFUNCTION(json_pretty, 1,1,0, 0,0,0, jsonPrettyFunc), + JFUNCTION(json_pretty, 2,1,0, 0,0,0, jsonPrettyFunc), JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc), JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc), JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc), @@ -209161,11 +211008,9 @@ static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){ ** Clear the Rtree.pNodeBlob object */ static void nodeBlobReset(Rtree *pRtree){ - if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){ - sqlite3_blob *pBlob = pRtree->pNodeBlob; - pRtree->pNodeBlob = 0; - sqlite3_blob_close(pBlob); - } + sqlite3_blob *pBlob = pRtree->pNodeBlob; + pRtree->pNodeBlob = 0; + sqlite3_blob_close(pBlob); } /* @@ -209209,7 +211054,6 @@ static int nodeAcquire( &pRtree->pNodeBlob); } if( rc ){ - nodeBlobReset(pRtree); *ppNode = 0; /* If unable to open an sqlite3_blob on the desired row, that can only ** be because the shadow tables hold erroneous data. */ @@ -209269,6 +211113,7 @@ static int nodeAcquire( } *ppNode = pNode; }else{ + nodeBlobReset(pRtree); if( pNode ){ pRtree->nNodeRef--; sqlite3_free(pNode); @@ -209413,6 +211258,7 @@ static void nodeGetCoord( int iCoord, /* Which coordinate to extract */ RtreeCoord *pCoord /* OUT: Space to write result to */ ){ + assert( iCellzData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); } @@ -209602,7 +211448,9 @@ static int rtreeClose(sqlite3_vtab_cursor *cur){ sqlite3_finalize(pCsr->pReadAux); sqlite3_free(pCsr); pRtree->nCursor--; - nodeBlobReset(pRtree); + if( pRtree->nCursor==0 && pRtree->inWrTrans==0 ){ + nodeBlobReset(pRtree); + } return SQLITE_OK; } @@ -210187,7 +212035,11 @@ static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ int rc = SQLITE_OK; RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); if( rc==SQLITE_OK && ALWAYS(p) ){ - *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); + if( p->iCell>=NCELL(pNode) ){ + rc = SQLITE_ABORT; + }else{ + *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); + } } return rc; } @@ -210205,6 +212057,7 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ if( rc ) return rc; if( NEVER(p==0) ) return SQLITE_OK; + if( p->iCell>=NCELL(pNode) ) return SQLITE_ABORT; if( i==0 ){ sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); }else if( i<=pRtree->nDim2 ){ @@ -210302,6 +212155,8 @@ static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){ return SQLITE_OK; } +SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double); + /* ** Rtree virtual table module xFilter method. */ @@ -210331,7 +212186,8 @@ static int rtreeFilter( i64 iNode = 0; int eType = sqlite3_value_numeric_type(argv[0]); if( eType==SQLITE_INTEGER - || (eType==SQLITE_FLOAT && sqlite3_value_double(argv[0])==iRowid) + || (eType==SQLITE_FLOAT + && 0==sqlite3IntFloatCompare(iRowid,sqlite3_value_double(argv[0]))) ){ rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode); }else{ @@ -211687,7 +213543,7 @@ static int rtreeUpdate( static int rtreeBeginTransaction(sqlite3_vtab *pVtab){ Rtree *pRtree = (Rtree *)pVtab; assert( pRtree->inWrTrans==0 ); - pRtree->inWrTrans++; + pRtree->inWrTrans = 1; return SQLITE_OK; } @@ -211701,6 +213557,9 @@ static int rtreeEndTransaction(sqlite3_vtab *pVtab){ nodeBlobReset(pRtree); return SQLITE_OK; } +static int rtreeRollback(sqlite3_vtab *pVtab){ + return rtreeEndTransaction(pVtab); +} /* ** The xRename method for rtree module virtual tables. @@ -211819,7 +213678,7 @@ static sqlite3_module rtreeModule = { rtreeBeginTransaction, /* xBegin - begin transaction */ rtreeEndTransaction, /* xSync - sync transaction */ rtreeEndTransaction, /* xCommit - commit transaction */ - rtreeEndTransaction, /* xRollback - rollback transaction */ + rtreeRollback, /* xRollback - rollback transaction */ 0, /* xFindFunction - function overloading */ rtreeRename, /* xRename - rename the table */ rtreeSavepoint, /* xSavepoint */ @@ -215238,7 +217097,7 @@ static void icuLoadCollation( UCollator *pUCollator; /* ICU library collation object */ int rc; /* Return code from sqlite3_create_collation_x() */ - assert(nArg==2); + assert(nArg==2 || nArg==3); (void)nArg; /* Unused parameter */ zLocale = (const char *)sqlite3_value_text(apArg[0]); zName = (const char *)sqlite3_value_text(apArg[1]); @@ -215253,7 +217112,39 @@ static void icuLoadCollation( return; } assert(p); - + if(nArg==3){ + const char *zOption = (const char*)sqlite3_value_text(apArg[2]); + static const struct { + const char *zName; + UColAttributeValue val; + } aStrength[] = { + { "PRIMARY", UCOL_PRIMARY }, + { "SECONDARY", UCOL_SECONDARY }, + { "TERTIARY", UCOL_TERTIARY }, + { "DEFAULT", UCOL_DEFAULT_STRENGTH }, + { "QUARTERNARY", UCOL_QUATERNARY }, + { "IDENTICAL", UCOL_IDENTICAL }, + }; + unsigned int i; + for(i=0; i=sizeof(aStrength)/sizeof(aStrength[0]) ){ + sqlite3_str *pStr = sqlite3_str_new(sqlite3_context_db_handle(p)); + sqlite3_str_appendf(pStr, + "unknown collation strength \"%s\" - should be one of:", + zOption); + for(i=0; ipTblIter, &p->zErrmsg); pIter->zTbl = 0; + pIter->zDataTbl = 0; }else{ pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0); pIter->zDataTbl = (const char*)sqlite3_column_text(pIter->pTblIter,1); @@ -219206,7 +221100,7 @@ static i64 rbuShmChecksum(sqlite3rbu *p){ u32 volatile *ptr; p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr); if( p->rc==SQLITE_OK ){ - iRet = ((i64)ptr[10] << 32) + ptr[11]; + iRet = (i64)(((u64)ptr[10] << 32) + ptr[11]); } } return iRet; @@ -226677,14 +228571,14 @@ static int sessionChangesetNextOne( p->rc = sessionInputBuffer(&p->in, 2); if( p->rc!=SQLITE_OK ) return p->rc; + sessionDiscardData(&p->in); + p->in.iCurrent = p->in.iNext; + /* If the iterator is already at the end of the changeset, return DONE. */ if( p->in.iNext>=p->in.nData ){ return SQLITE_DONE; } - sessionDiscardData(&p->in); - p->in.iCurrent = p->in.iNext; - op = p->in.aData[p->in.iNext++]; while( op=='T' || op=='P' ){ if( pbNew ) *pbNew = 1; @@ -228419,6 +230313,7 @@ struct sqlite3_changegroup { int rc; /* Error code */ int bPatch; /* True to accumulate patchsets */ SessionTable *pList; /* List of tables in current patch */ + SessionBuffer rec; sqlite3 *db; /* Configured by changegroup_schema() */ char *zDb; /* Configured by changegroup_schema() */ @@ -228717,108 +230612,128 @@ static int sessionChangesetExtendRecord( } /* -** Add all changes in the changeset traversed by the iterator passed as -** the first argument to the changegroup hash tables. +** Locate or create a SessionTable object that may be used to add the +** change currently pointed to by iterator pIter to changegroup pGrp. +** If successful, set output variable (*ppTab) to point to the table +** object and return SQLITE_OK. Otherwise, if some error occurs, return +** an SQLite error code and leave (*ppTab) set to NULL. */ -static int sessionChangesetToHash( - sqlite3_changeset_iter *pIter, /* Iterator to read from */ - sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ - int bRebase /* True if hash table is for rebasing */ +static int sessionChangesetFindTable( + sqlite3_changegroup *pGrp, + const char *zTab, + sqlite3_changeset_iter *pIter, + SessionTable **ppTab ){ - u8 *aRec; - int nRec; int rc = SQLITE_OK; SessionTable *pTab = 0; - SessionBuffer rec = {0, 0, 0}; - - while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){ - const char *zNew; - int nCol; - int op; - int iHash; - int bIndirect; - SessionChange *pChange; - SessionChange *pExist = 0; - SessionChange **pp; - - /* Ensure that only changesets, or only patchsets, but not a mixture - ** of both, are being combined. It is an error to try to combine a - ** changeset and a patchset. */ - if( pGrp->pList==0 ){ - pGrp->bPatch = pIter->bPatchset; - }else if( pIter->bPatchset!=pGrp->bPatch ){ - rc = SQLITE_ERROR; - break; - } + int nTab = (int)strlen(zTab); + u8 *abPK = 0; + int nCol = 0; - sqlite3changeset_op(pIter, &zNew, &nCol, &op, &bIndirect); - if( !pTab || sqlite3_stricmp(zNew, pTab->zName) ){ - /* Search the list for a matching table */ - int nNew = (int)strlen(zNew); - u8 *abPK; + *ppTab = 0; + sqlite3changeset_pk(pIter, &abPK, &nCol); - sqlite3changeset_pk(pIter, &abPK, 0); - for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ - if( 0==sqlite3_strnicmp(pTab->zName, zNew, nNew+1) ) break; - } - if( !pTab ){ - SessionTable **ppTab; + /* Search the list for an existing table */ + for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ + if( 0==sqlite3_strnicmp(pTab->zName, zTab, nTab+1) ) break; + } - pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nNew+1); - if( !pTab ){ - rc = SQLITE_NOMEM; - break; - } - memset(pTab, 0, sizeof(SessionTable)); - pTab->nCol = nCol; - pTab->abPK = (u8*)&pTab[1]; - memcpy(pTab->abPK, abPK, nCol); - pTab->zName = (char*)&pTab->abPK[nCol]; - memcpy(pTab->zName, zNew, nNew+1); - - if( pGrp->db ){ - pTab->nCol = 0; - rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); - if( rc ){ - assert( pTab->azCol==0 ); - sqlite3_free(pTab); - break; - } - } + /* If one was not found above, create a new table now */ + if( !pTab ){ + SessionTable **ppNew; - /* The new object must be linked on to the end of the list, not - ** simply added to the start of it. This is to ensure that the - ** tables within the output of sqlite3changegroup_output() are in - ** the right order. */ - for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext); - *ppTab = pTab; - } + pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nTab+1); + if( !pTab ){ + return SQLITE_NOMEM; + } + memset(pTab, 0, sizeof(SessionTable)); + pTab->nCol = nCol; + pTab->abPK = (u8*)&pTab[1]; + memcpy(pTab->abPK, abPK, nCol); + pTab->zName = (char*)&pTab->abPK[nCol]; + memcpy(pTab->zName, zTab, nTab+1); - if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ - rc = SQLITE_SCHEMA; - break; + if( pGrp->db ){ + pTab->nCol = 0; + rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); + if( rc ){ + assert( pTab->azCol==0 ); + sqlite3_free(pTab); + return rc; } } - if( nColnCol ){ - assert( pGrp->db ); - rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec); - if( rc ) break; - aRec = rec.aBuf; - nRec = rec.nBuf; - } + /* The new object must be linked on to the end of the list, not + ** simply added to the start of it. This is to ensure that the + ** tables within the output of sqlite3changegroup_output() are in + ** the right order. */ + for(ppNew=&pGrp->pList; *ppNew; ppNew=&(*ppNew)->pNext); + *ppNew = pTab; + } - if( sessionGrowHash(0, pIter->bPatchset, pTab) ){ - rc = SQLITE_NOMEM; - break; - } + /* Check that the table is compatible. */ + if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ + rc = SQLITE_SCHEMA; + } + + *ppTab = pTab; + return rc; +} + +/* +** Add the change currently indicated by iterator pIter to the hash table +** belonging to changegroup pGrp. +*/ +static int sessionOneChangeToHash( + sqlite3_changegroup *pGrp, + sqlite3_changeset_iter *pIter, + int bRebase +){ + int rc = SQLITE_OK; + int nCol = 0; + int op = 0; + int iHash = 0; + int bIndirect = 0; + SessionChange *pChange = 0; + SessionChange *pExist = 0; + SessionChange **pp = 0; + SessionTable *pTab = 0; + u8 *aRec = &pIter->in.aData[pIter->in.iCurrent + 2]; + int nRec = (pIter->in.iNext - pIter->in.iCurrent) - 2; + + /* Ensure that only changesets, or only patchsets, but not a mixture + ** of both, are being combined. It is an error to try to combine a + ** changeset and a patchset. */ + if( pGrp->pList==0 ){ + pGrp->bPatch = pIter->bPatchset; + }else if( pIter->bPatchset!=pGrp->bPatch ){ + rc = SQLITE_ERROR; + } + + if( rc==SQLITE_OK ){ + const char *zTab = 0; + sqlite3changeset_op(pIter, &zTab, &nCol, &op, &bIndirect); + rc = sessionChangesetFindTable(pGrp, zTab, pIter, &pTab); + } + + if( rc==SQLITE_OK && nColnCol ){ + SessionBuffer *pBuf = &pGrp->rec; + rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, pBuf); + aRec = pBuf->aBuf; + nRec = pBuf->nBuf; + assert( pGrp->db ); + } + + if( rc==SQLITE_OK && sessionGrowHash(0, pIter->bPatchset, pTab) ){ + rc = SQLITE_NOMEM; + } + + if( rc==SQLITE_OK ){ + /* Search for existing entry. If found, remove it from the hash table. + ** Code below may link it back in. */ iHash = sessionChangeHash( pTab, (pIter->bPatchset && op==SQLITE_DELETE), aRec, pTab->nChange ); - - /* Search for existing entry. If found, remove it from the hash table. - ** Code below may link it back in. - */ for(pp=&pTab->apChange[iHash]; *pp; pp=&(*pp)->pNext){ int bPkOnly1 = 0; int bPkOnly2 = 0; @@ -228833,19 +230748,41 @@ static int sessionChangesetToHash( break; } } + } + if( rc==SQLITE_OK ){ rc = sessionChangeMerge(pTab, bRebase, pIter->bPatchset, pExist, op, bIndirect, aRec, nRec, &pChange ); - if( rc ) break; - if( pChange ){ - pChange->pNext = pTab->apChange[iHash]; - pTab->apChange[iHash] = pChange; - pTab->nEntry++; - } + } + if( rc==SQLITE_OK && pChange ){ + pChange->pNext = pTab->apChange[iHash]; + pTab->apChange[iHash] = pChange; + pTab->nEntry++; + } + + if( rc==SQLITE_OK ) rc = pIter->rc; + return rc; +} + +/* +** Add all changes in the changeset traversed by the iterator passed as +** the first argument to the changegroup hash tables. +*/ +static int sessionChangesetToHash( + sqlite3_changeset_iter *pIter, /* Iterator to read from */ + sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ + int bRebase /* True if hash table is for rebasing */ +){ + u8 *aRec; + int nRec; + int rc = SQLITE_OK; + + while( SQLITE_ROW==(sessionChangesetNext(pIter, &aRec, &nRec, 0)) ){ + rc = sessionOneChangeToHash(pGrp, pIter, bRebase); + if( rc!=SQLITE_OK ) break; } - sqlite3_free(rec.aBuf); if( rc==SQLITE_OK ) rc = pIter->rc; return rc; } @@ -228973,6 +230910,23 @@ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void return rc; } +/* +** Add a single change to a changeset-group. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup *pGrp, + sqlite3_changeset_iter *pIter +){ + if( pIter->in.iCurrent==pIter->in.iNext + || pIter->rc!=SQLITE_OK + || pIter->bInvert + ){ + /* Iterator does not point to any valid entry or is an INVERT iterator. */ + return SQLITE_ERROR; + } + return sessionOneChangeToHash(pGrp, pIter, 0); +} + /* ** Obtain a buffer containing a changeset representing the concatenation ** of all changesets added to the group so far. @@ -229022,6 +230976,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ sqlite3_free(pGrp->zDb); sessionDeleteTable(0, pGrp->pList); + sqlite3_free(pGrp->rec.aBuf); sqlite3_free(pGrp); } } @@ -229423,6 +231378,7 @@ SQLITE_API int sqlite3rebaser_rebase_strm( SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){ if( p ){ sessionDeleteTable(0, p->grp.pList); + sqlite3_free(p->grp.rec.aBuf); sqlite3_free(p); } } @@ -229520,8 +231476,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -231117,6 +233073,9 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** sqlite3Fts5ParserARG_STORE Code to store %extra_argument into fts5yypParser ** sqlite3Fts5ParserARG_FETCH Code to extract %extra_argument from fts5yypParser ** sqlite3Fts5ParserCTX_* As sqlite3Fts5ParserARG_ except for %extra_context +** fts5YYREALLOC Name of the realloc() function to use +** fts5YYFREE Name of the free() function to use +** fts5YYDYNSTACK True if stack space should be extended on heap ** fts5YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** fts5YYNSTATE the combined number of states. @@ -231130,6 +233089,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** fts5YY_NO_ACTION The fts5yy_action[] code for no-op ** fts5YY_MIN_REDUCE Minimum value for reduce actions ** fts5YY_MAX_REDUCE Maximum value for reduce actions +** fts5YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** fts5YY_MAX_DSTRCTR Maximum symbol value that has a destructor */ #ifndef INTERFACE # define INTERFACE 1 @@ -231156,6 +233117,9 @@ typedef union { #define sqlite3Fts5ParserARG_PARAM ,pParse #define sqlite3Fts5ParserARG_FETCH Fts5Parse *pParse=fts5yypParser->pParse; #define sqlite3Fts5ParserARG_STORE fts5yypParser->pParse=pParse; +#define fts5YYREALLOC realloc +#define fts5YYFREE free +#define fts5YYDYNSTACK 0 #define sqlite3Fts5ParserCTX_SDECL #define sqlite3Fts5ParserCTX_PDECL #define sqlite3Fts5ParserCTX_PARAM @@ -231173,6 +233137,8 @@ typedef union { #define fts5YY_NO_ACTION 82 #define fts5YY_MIN_REDUCE 83 #define fts5YY_MAX_REDUCE 110 +#define fts5YY_MIN_DSTRCTR 16 +#define fts5YY_MAX_DSTRCTR 24 /************* End control #defines *******************************************/ #define fts5YY_NLOOKAHEAD ((int)(sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0]))) @@ -231188,6 +233154,22 @@ typedef union { # define fts5yytestcase(X) #endif +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if fts5YYSTACKDEPTH<=0 || fts5YYDYNSTACK +# define fts5YYGROWABLESTACK 1 +#else +# define fts5YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if fts5YYSTACKDEPTH<=0 +# undef fts5YYSTACKDEPTH +# define fts5YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement @@ -231348,14 +233330,9 @@ struct fts5yyParser { #endif sqlite3Fts5ParserARG_SDECL /* A place to hold %extra_argument */ sqlite3Fts5ParserCTX_SDECL /* A place to hold %extra_context */ -#if fts5YYSTACKDEPTH<=0 - int fts5yystksz; /* Current side of the stack */ - fts5yyStackEntry *fts5yystack; /* The parser's stack */ - fts5yyStackEntry fts5yystk0; /* First stack entry */ -#else - fts5yyStackEntry fts5yystack[fts5YYSTACKDEPTH]; /* The parser's stack */ - fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */ -#endif + fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */ + fts5yyStackEntry *fts5yystack; /* The parser stack */ + fts5yyStackEntry fts5yystk0[fts5YYSTACKDEPTH]; /* Initial stack space */ }; typedef struct fts5yyParser fts5yyParser; @@ -231462,37 +233439,45 @@ static const char *const fts5yyRuleName[] = { #endif /* NDEBUG */ -#if fts5YYSTACKDEPTH<=0 +#if fts5YYGROWABLESTACK /* ** Try to increase the size of the parser stack. Return the number ** of errors. Return 0 on success. */ static int fts5yyGrowStack(fts5yyParser *p){ + int oldSize = 1 + (int)(p->fts5yystackEnd - p->fts5yystack); int newSize; int idx; fts5yyStackEntry *pNew; - newSize = p->fts5yystksz*2 + 100; - idx = p->fts5yytos ? (int)(p->fts5yytos - p->fts5yystack) : 0; - if( p->fts5yystack==&p->fts5yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->fts5yystk0; + newSize = oldSize*2 + 100; + idx = (int)(p->fts5yytos - p->fts5yystack); + if( p->fts5yystack==p->fts5yystk0 ){ + pNew = fts5YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->fts5yystack, oldSize*sizeof(pNew[0])); }else{ - pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0])); + pNew = fts5YYREALLOC(p->fts5yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; } - if( pNew ){ - p->fts5yystack = pNew; - p->fts5yytos = &p->fts5yystack[idx]; + p->fts5yystack = pNew; + p->fts5yytos = &p->fts5yystack[idx]; #ifndef NDEBUG - if( fts5yyTraceFILE ){ - fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", - fts5yyTracePrompt, p->fts5yystksz, newSize); - } -#endif - p->fts5yystksz = newSize; + if( fts5yyTraceFILE ){ + fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", + fts5yyTracePrompt, oldSize, newSize); } - return pNew==0; +#endif + p->fts5yystackEnd = &p->fts5yystack[newSize-1]; + return 0; } +#endif /* fts5YYGROWABLESTACK */ + +#if !fts5YYGROWABLESTACK +/* For builds that do no have a growable stack, fts5yyGrowStack always +** returns an error. +*/ +# define fts5yyGrowStack(X) 1 #endif /* Datatype of the argument to the memory allocated passed as the @@ -231512,24 +233497,14 @@ static void sqlite3Fts5ParserInit(void *fts5yypRawParser sqlite3Fts5ParserCTX_PD #ifdef fts5YYTRACKMAXSTACKDEPTH fts5yypParser->fts5yyhwm = 0; #endif -#if fts5YYSTACKDEPTH<=0 - fts5yypParser->fts5yytos = NULL; - fts5yypParser->fts5yystack = NULL; - fts5yypParser->fts5yystksz = 0; - if( fts5yyGrowStack(fts5yypParser) ){ - fts5yypParser->fts5yystack = &fts5yypParser->fts5yystk0; - fts5yypParser->fts5yystksz = 1; - } -#endif + fts5yypParser->fts5yystack = fts5yypParser->fts5yystk0; + fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1]; #ifndef fts5YYNOERRORRECOVERY fts5yypParser->fts5yyerrcnt = -1; #endif fts5yypParser->fts5yytos = fts5yypParser->fts5yystack; fts5yypParser->fts5yystack[0].stateno = 0; fts5yypParser->fts5yystack[0].major = 0; -#if fts5YYSTACKDEPTH>0 - fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1]; -#endif } #ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK @@ -231643,9 +233618,26 @@ static void fts5yy_pop_parser_stack(fts5yyParser *pParser){ */ static void sqlite3Fts5ParserFinalize(void *p){ fts5yyParser *pParser = (fts5yyParser*)p; - while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser); -#if fts5YYSTACKDEPTH<=0 - if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack); + + /* In-lined version of calling fts5yy_pop_parser_stack() for each + ** element left in the stack */ + fts5yyStackEntry *fts5yytos = pParser->fts5yytos; + while( fts5yytos>pParser->fts5yystack ){ +#ifndef NDEBUG + if( fts5yyTraceFILE ){ + fprintf(fts5yyTraceFILE,"%sPopping %s\n", + fts5yyTracePrompt, + fts5yyTokenName[fts5yytos->major]); + } +#endif + if( fts5yytos->major>=fts5YY_MIN_DSTRCTR ){ + fts5yy_destructor(pParser, fts5yytos->major, &fts5yytos->minor); + } + fts5yytos--; + } + +#if fts5YYGROWABLESTACK + if( pParser->fts5yystack!=pParser->fts5yystk0 ) fts5YYFREE(pParser->fts5yystack); #endif } @@ -231872,25 +233864,19 @@ static void fts5yy_shift( assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack) ); } #endif -#if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yytos>fts5yypParser->fts5yystackEnd ){ - fts5yypParser->fts5yytos--; - fts5yyStackOverflow(fts5yypParser); - return; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){ + fts5yytos = fts5yypParser->fts5yytos; + if( fts5yytos>fts5yypParser->fts5yystackEnd ){ if( fts5yyGrowStack(fts5yypParser) ){ fts5yypParser->fts5yytos--; fts5yyStackOverflow(fts5yypParser); return; } + fts5yytos = fts5yypParser->fts5yytos; + assert( fts5yytos <= fts5yypParser->fts5yystackEnd ); } -#endif if( fts5yyNewState > fts5YY_MAX_SHIFT ){ fts5yyNewState += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; } - fts5yytos = fts5yypParser->fts5yytos; fts5yytos->stateno = fts5yyNewState; fts5yytos->major = fts5yyMajor; fts5yytos->minor.fts5yy0 = fts5yyMinor; @@ -232327,19 +234313,12 @@ static void sqlite3Fts5Parser( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); } #endif -#if fts5YYSTACKDEPTH>0 if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ - fts5yyStackOverflow(fts5yypParser); - break; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ if( fts5yyGrowStack(fts5yypParser) ){ fts5yyStackOverflow(fts5yypParser); break; } } -#endif } fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyruleno,fts5yymajor,fts5yyminor sqlite3Fts5ParserCTX_PARAM); }else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ @@ -235016,7 +236995,11 @@ static int sqlite3Fts5ExprNew( } sqlite3_free(sParse.apPhrase); - *pzErr = sParse.zErr; + if( 0==*pzErr ){ + *pzErr = sParse.zErr; + }else{ + sqlite3_free(sParse.zErr); + } return sParse.rc; } @@ -237144,6 +239127,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( assert( pRight->eType==FTS5_STRING || pRight->eType==FTS5_TERM || pRight->eType==FTS5_EOF + || (pRight->eType==FTS5_AND && pParse->bPhraseToAnd) ); if( pLeft->eType==FTS5_AND ){ @@ -245378,23 +247362,26 @@ static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){ int ii; Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *pIndex = pIter->pIndex; for(ii=0; iinIter; ii++){ Fts5Iter *p = pT->apIter[ii]; if( p->base.bEof==0 && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowidpIndex, p, bFrom, iFrom); + fts5MultiIterNext(pIndex, p, bFrom, iFrom); while( bFrom && p->base.bEof==0 && p->base.iRowidpIndex->rc==SQLITE_OK + && pIndex->rc==SQLITE_OK ){ - fts5MultiIterNext(p->pIndex, p, 0, 0); + fts5MultiIterNext(pIndex, p, 0, 0); } } } - fts5IterSetOutputsTokendata(pIter); + if( pIndex->rc==SQLITE_OK ){ + fts5IterSetOutputsTokendata(pIter); + } } /* @@ -249308,6 +251295,7 @@ static int fts5UpdateMethod( rc = SQLITE_ERROR; }else{ rc = fts5SpecialDelete(pTab, apVal); + bUpdateOrDelete = 1; } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); @@ -250482,14 +252470,16 @@ static int sqlite3Fts5GetTokenizer( if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + if( pzErr ) *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); }else{ rc = pMod->x.xCreate( pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok ); pConfig->pTokApi = &pMod->x; if( rc!=SQLITE_OK ){ - if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + if( pzErr && rc!=SQLITE_NOMEM ){ + *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + } }else{ pConfig->ePattern = sqlite3Fts5TokenizerPattern( pMod->x.xCreate, pConfig->pTok @@ -250548,7 +252538,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33", -1, SQLITE_TRANSIENT); } /* @@ -250583,18 +252573,25 @@ static int fts5IntegrityMethod( assert( pzErr!=0 && *pzErr==0 ); UNUSED_PARAM(isQuick); + assert( pTab->p.pConfig->pzErrmsg==0 ); + pTab->p.pConfig->pzErrmsg = pzErr; rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); - if( (rc&0xff)==SQLITE_CORRUPT ){ - *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", - zSchema, zTabname); - }else if( rc!=SQLITE_OK ){ - *pzErr = sqlite3_mprintf("unable to validate the inverted index for" - " FTS5 table %s.%s: %s", - zSchema, zTabname, sqlite3_errstr(rc)); + if( *pzErr==0 && rc!=SQLITE_OK ){ + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", + zSchema, zTabname); + rc = (*pzErr) ? SQLITE_OK : SQLITE_NOMEM; + }else{ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS5 table %s.%s: %s", + zSchema, zTabname, sqlite3_errstr(rc)); + } } + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + pTab->p.pConfig->pzErrmsg = 0; - return SQLITE_OK; + return rc; } static int fts5Init(sqlite3 *db){ @@ -252026,7 +254023,7 @@ static int fts5AsciiCreate( int i; memset(p, 0, sizeof(AsciiTokenizer)); memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar)); - for(i=0; rc==SQLITE_OK && ibFold = 1; pNew->iFoldParam = 0; - for(i=0; rc==SQLITE_OK && iiFoldParam!=0 && pNew->bFold==0 ){ rc = SQLITE_ERROR; diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index a07a51952..d67a4adb6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.45.1" -#define SQLITE_VERSION_NUMBER 3045001 -#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a" +#define SQLITE_VERSION "3.46.1" +#define SQLITE_VERSION_NUMBER 3046001 +#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -421,6 +421,8 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. **

  • The application must not modify the SQL statement text passed into ** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not dereference the arrays or string pointers +** passed as the 3rd and 4th callback parameters after it returns. ** */ SQLITE_API int sqlite3_exec( @@ -763,11 +765,11 @@ struct sqlite3_file { ** ** xLock() upgrades the database file lock. In other words, xLock() moves the ** database file lock in the direction NONE toward EXCLUSIVE. The argument to -** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** requested lock, then the call to xLock() is a no-op. ** xUnlock() downgrades the database file lock to either SHARED or NONE. -* If the lock is already at or below the requested lock state, then the call +** If the lock is already at or below the requested lock state, then the call ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, @@ -2142,6 +2144,22 @@ struct sqlite3_mem_methods { ** configuration setting is never used, then the default maximum is determined ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that ** compile-time option is not set, then the default maximum is 1073741824. +** +** [[SQLITE_CONFIG_ROWID_IN_VIEW]] +**
    SQLITE_CONFIG_ROWID_IN_VIEW +**
    The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability +** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is +** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability +** defaults to on. This configuration option queries the current setting or +** changes the setting to off or on. The argument is a pointer to an integer. +** If that integer initially holds a value of 1, then the ability for VIEWs to +** have ROWIDs is activated. If the integer initially holds zero, then the +** ability is deactivated. Any other initial value for the integer leaves the +** setting unchanged. After changes, if any, the integer is written with +** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite +** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and +** recommended case) then the integer is always filled with zero, regardless +** if its initial value. ** */ #define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ @@ -2173,6 +2191,7 @@ struct sqlite3_mem_methods { #define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */ #define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */ #define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */ +#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */ /* ** CAPI3REF: Database Connection Configuration Options @@ -3287,8 +3306,8 @@ SQLITE_API int sqlite3_set_authorizer( #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 +** CAPI3REF: Deprecated Tracing And Profiling Functions +** DEPRECATED ** ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** instead of the routines described here. @@ -6869,6 +6888,12 @@ SQLITE_API int sqlite3_autovacuum_pages( ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** +** Whether the update hook is invoked before or after the +** corresponding change is currently unspecified and may differ +** depending on the type of change. Do not rely on the order of the +** hook call with regards to the final result of the operation which +** triggers the hook. +** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the @@ -8339,7 +8364,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** The sqlite3_keyword_count() interface returns the number of distinct ** keywords understood by SQLite. ** -** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** makes *Z point to that keyword expressed as UTF8 and writes the number ** of bytes in the keyword into *L. The string that *Z points to is not ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns @@ -9918,24 +9943,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
  • ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** that the query planner does not need the rows returned in any particular -** order, as long as rows with the same values in all "aOrderBy" columns -** are adjacent.)^ ^(Furthermore, only a single row for each particular -** combination of values in the columns identified by the "aOrderBy" field -** needs to be returned.)^ ^It is always ok for two or more rows with the same -** values in all "aOrderBy" columns to be returned, as long as all such rows -** are adjacent. ^The virtual table may, if it chooses, omit extra rows -** that have the same value for all columns identified by "aOrderBy". -** ^However omitting the extra rows is optional. +** order, as long as rows with the same values in all columns identified +** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows +** contain the same values for all columns identified by "colUsed", all but +** one such row may optionally be omitted from the result.)^ +** The virtual table is not required to omit rows that are duplicates +** over the "colUsed" columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. ** This mode is used for a DISTINCT query. **

  • -** ^(If the sqlite3_vtab_distinct() interface returns 3, that means -** that the query planner needs only distinct rows but it does need the -** rows to be sorted.)^ ^The virtual table implementation is free to omit -** rows that are identical in all aOrderBy columns, if it wants to, but -** it is not required to omit any rows. This mode is used for queries +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the +** virtual table must return rows in the order defined by "aOrderBy" as +** if the sqlite3_vtab_distinct() interface had returned 0. However if +** two or more rows in the result have the same values for all columns +** identified by "colUsed", then all but one such row may optionally be +** omitted.)^ Like when the return value is 2, the virtual table +** is not required to omit rows that are duplicates over the "colUsed" +** columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. +** This mode is used for queries ** that have both DISTINCT and ORDER BY clauses. ** ** +**

    The following table summarizes the conditions under which the +** virtual table is allowed to set the "orderByConsumed" flag based on +** the value returned by sqlite3_vtab_distinct(). This table is a +** restatement of the previous four paragraphs: +** +** +** +**
    sqlite3_vtab_distinct() return value +** Rows are returned in aOrderBy order +** Rows with the same value in all aOrderBy columns are adjacent +** Duplicates over all colUsed columns may be omitted +**
    0yesyesno +**
    1noyesno +**
    2noyesyes +**
    3yesyesyes +**
    +** ** ^For the purposes of comparing virtual table output values to see if the ** values are same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" @@ -11980,6 +12026,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); +/* +** CAPI3REF: Add A Single Change To A Changegroup +** METHOD: sqlite3_changegroup +** +** This function adds the single change currently indicated by the iterator +** passed as the second argument to the changegroup object. The rules for +** adding the change are just as described for [sqlite3changegroup_add()]. +** +** If the change is successfully added to the changegroup, SQLITE_OK is +** returned. Otherwise, an SQLite error code is returned. +** +** The iterator must point to a valid entry when this function is called. +** If it does not, SQLITE_ERROR is returned and no change is added to the +** changegroup. Additionally, the iterator must not have been opened with +** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also +** returned. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup*, + sqlite3_changeset_iter* +); + + + /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** METHOD: sqlite3_changegroup @@ -12784,8 +12854,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index 4b3b6cab5..ed2a9e2a3 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -1679,7 +1679,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { } } - // Forgein Keys + // Foreign Keys if foreignKeys > -1 { if err := exec(fmt.Sprintf("PRAGMA foreign_keys = %d;", foreignKeys)); err != nil { C.sqlite3_close_v2(db) diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go index 95cc7c0b6..6ef230862 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go @@ -18,5 +18,6 @@ package sqlite3 #cgo openbsd LDFLAGS: -lsqlite3 #cgo solaris LDFLAGS: -lsqlite3 #cgo windows LDFLAGS: -lsqlite3 +#cgo zos LDFLAGS: -lsqlite3 */ import "C" diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index de9630c27..76d840164 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -86,7 +86,7 @@ var ( // combination is incorrect or unknown. // // If the SQLITE_USER table is not present in the database file, then -// this interface is a harmless no-op returnning SQLITE_OK. +// this interface is a harmless no-op returning SQLITE_OK. func (c *SQLiteConn) Authenticate(username, password string) error { rv := c.authenticate(username, password) switch rv { diff --git a/vendor/github.com/moby/buildkit/AUTHORS b/vendor/github.com/moby/buildkit/AUTHORS index c1dce6558..7a9966ff2 100644 --- a/vendor/github.com/moby/buildkit/AUTHORS +++ b/vendor/github.com/moby/buildkit/AUTHORS @@ -1,66 +1,348 @@ # This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/generate-authors.sh`. +# For how it is generated, see hack/dockerfiles/authors.Dockerfile. +a-palchikov Aaron L. Xu Aaron Lehmann +Aaron Lehmann +Abdur Rehman +adamperlin +Addam Hardy +Adrian Plata +Adrien Delorme +Ahmon Dancy +Aidan Hobson Sayers Akihiro Suda +Alan Fregtman <941331+darkvertex@users.noreply.github.com> +Alano Terblanche <18033717+Benehiko@users.noreply.github.com> +Aleksa Sarai +Alex Couture-Beil +Alex Mayer +Alex Suraci Alexander Morozov +Alexis Murzeau Alice Frosi Allen Sun +Amen Belayneh +Anca Iordache Anda Xu +Anders F Björklund +Andrea Bolognani +Andrea Luzzardi +Andrew Chang +Andrey Smirnov +Andy Alt +Andy Caldwell +Ankush Agarwal +Anthony Nandaa Anthony Sottile +Anurag Goel +Anusha Ragunathan +Arkadiusz Drabczyk +Arnaldo Garcia Rincon Arnaud Bailly +Artem Khramov +Austin Vazquez +Avi Deitcher +Bastiaan Bakker +Ben Longo +Bertrand Paquet +Billy Owire Bin Liu +Bjorn Neergaard +Brandon Mitchell +Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brian Goff +Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> +Burt Holzman +Ce Gao +Chaerim Yeo +Changwei Ge +Chanhun Jeong +ChaosGramer +Charles Chan +Charles Korn +Charles Law +Chenbin +Chris Goller +Chris McKinnel +Christian Höltje +Christian Weichel +Ciro S. Costa +Claudiu Belu +Colin Chartier +Corey Larson +Cory Bennett +Cory Snider +coryb +Craig Andrews +CrazyMax +Csaba Apagyi +cuiyourong +Dan Duvall +Daniel Cassidy Daniel Nephin +Darren Shepherd Dave Chen +Dave Henderson +Dave Tucker David Calavera +David Dooling +David Gageot +David Karlsson +Davis Schirmer +Debosmit Ray Dennis Chen -Derek McGowan +Dennis Haney +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> +Derek McGowan +Dharmit Shah +Ding Fei +dito Doug Davis -Edgar Lee +Edgar Lee Eli Uriegas +Elias Faxö +Eng Zer Jun +Eric Engestrom +Erik McKelvey +Erik Sipsma +eyherabh f0 +fanjiyun.fjy +Felix Fontein Fernando Miguel +Fiona Klute +Foysal Iqbal +Frank Villaro-Dixon +frankyang +Fred Cox +Frieder Bluemle +Félix Mattrat +Gabriel-Adrian Samfira +Gaetan de Villele +Gahl Saraf +genglu.gl +George +ggjulio +Govind Rai +Grant Reaber +Grégoire Payen de La Garanderie +guangwu +Guilhem Charles +guoguangwu +Hans van den Bogert Hao Hu +Hector S Helen Xie Himanshu Pandey Hiromu Nakamura +HowJMay +Hugo Santos +Höhl, Lukas Ian Campbell +Ian King'ori +Ignas Mikalajūnas +Ilya Dmitrichenko Iskander (Alex) Sharipov +Jacob Gillespie +Jacob MacElroy +Jakub Ciolek +James Carnegie Jean-Pierre Huynh +Jeffrey Huang +Jesper Noordsij +Jesse Rittner Jessica Frazelle +jgeiger +Jitender Kumar +jlecordier +joey John Howard +John Maguire +John Mulhausen +John Tims +Jon Zeolla +Jonathan A. Sternberg +Jonathan Azoff +Jonathan Giannuzzi Jonathan Stoppani +Jonny Stoten +JordanGoasdoue +jroenf +Julian Goede Justas Brazauskas +Justin Chadwell Justin Cormack +Justin Garrison +Jörg Franke <359489+NewJorg@users.noreply.github.com> +Kai Takac +Kang, Matthew +Kazuyoshi Kato +Kees Cook +Kevin Burke +kevinmeredith +Kir Kolyshkin +Kirill A. Korinsky +Kohei Tokunaga +Koichi Shiraishi +Kris-Mikael Krister Kunal Kushwaha +Kyle +l00397676 Lajos Papp +lalyos +Leandro Santiago +Levi Harrison +liulanzheng +liwenqi +lixiaobing10051267 +lomot +Lu Jingxiao +Luca Visentin +Maciej Kalisz +Madhav Puri +Manu Gupta +Marat Radchenko +Marcus Comstedt +Mark Gordon +Mark Yen +Marko Kohtala +Mary Anthony +masibw +Matias Insaurralde +Matt Kang Matt Rickard +Maxime Lagresle Michael Crosby +Michael Friis +Michael Irwin +Miguel Ángel Jimeno +Mihai Borobocea +Mike Brown +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milas Bowman +Misty Stanley-Jones +Mitsuru Kariya Miyachi Katsuya +Morgan Bauer +Moritz "WanzenBug" Wanzenböck +Morlay +msg Nao YONASHIRO Natasha Jarus +Nathan Sullivan +Nguyễn Đức Chiến +Nick Miyake +Nick Santos +Nikhil Pandeti +njucjc +Nobi Noel Georgi <18496730+frezbo@users.noreply.github.com> +Oliver Bristow +omahs <73983677+omahs@users.noreply.github.com> +Omer Duchovne <79370724+od-cyera@users.noreply.github.com> +Omer Mizrahi Ondrej Fabry +Otto Kekäläinen +Pablo Chico de Guzman +Patrick Hemmer +Patrick Lang Patrick Van Stee +Paul "TBBle" Hampson +Paweł Gronowski +Peter Dave Hello +Petr Fedchenkov +Petteri Räty +Phil Estes +Pierre Fenoll +pieterdd +Pranav Pandit +Pratik Raj +Prayag Verma +Qiang Huang +racequite +Remy Suen +Reshen +retornam Ri Xu +Rob Taylor +Robert Estelle +Rubens Figueiredo +Salim B +Sam Whited +Sascha Hemleb +Sascha Schwarze +Sean P. Kane Sebastiaan van Stijn +Seiya Miyata +Serhat Gülçiçek +Sertac Ozercan +Shaun Thompson Shev Yan +Shijiang Wei +Shingo Omura +Shiwei Zhang +Siebe Schaap +Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> Simon Ferquel +Slava Semushin +Solomon Hykes +squeegels <1674195+squeegels@users.noreply.github.com> +Stefan Scherer Stefan Weil +StefanSchoof +Stepan Blyshchak +Stephen Day +Steve Lohr +sunchunming +Sven Dowideit +Swagat Bora +Takuya Noguchi +Talon Bowler Thomas Leonard +Thomas Riccardi Thomas Shaw +Tianon Gravi Tibor Vass Tiffany Jernigan +Tim Waugh +Tim Wraight Tino Rusch Tobias Klauser Tomas Tomecek +Tomasz Kopczynski Tomohiro Kusumoto +Tristan Stenner +Troels Liebe Bentsen Tõnis Tiigi +Valentin Lorentz +Vasek - Tom C +Victor Vieux +Victoria Bialas Vincent Demeester +Vlad A. Ionescu +Vladislav Ivanov +Wang Yumu <37442693@qq.com> Wei Fu +Wei Zhang +wingkwong +x893675 +Xiaofan Zhang +Ximo Guanter +Yamazaki Masashi +Yan Song Yong Tang Yuichiro Kaneko +yumemio <59369226+yumemio@users.noreply.github.com> +Yurii Rashkovskii +yzewei +Zach Badgett +zhangwenlong +Zhizhen He Ziv Tsarfati +岁丰 +沈陵 +蝦米 郑泽宇 diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go index db1668f25..651b13148 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go @@ -13,12 +13,14 @@ import ( const ( keySyntax = "syntax" + keyCheck = "check" keyEscape = "escape" ) var validDirectives = map[string]struct{}{ keySyntax: {}, keyEscape: {}, + keyCheck: {}, } type Directive struct { @@ -110,6 +112,10 @@ func (d *DirectiveParser) ParseAll(data []byte) ([]*Directive, error) { // This allows for a flexible range of input formats, and appropriate syntax // selection. func DetectSyntax(dt []byte) (string, string, []Range, bool) { + return ParseDirective(keySyntax, dt) +} + +func ParseDirective(key string, dt []byte) (string, string, []Range, bool) { dt, hadShebang, err := discardShebang(dt) if err != nil { return "", "", nil, false @@ -119,42 +125,38 @@ func DetectSyntax(dt []byte) (string, string, []Range, bool) { line++ } - // use default directive parser, and search for #syntax= + // use default directive parser, and search for #key= directiveParser := DirectiveParser{line: line} - if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + if syntax, cmdline, loc, ok := detectDirectiveFromParser(key, dt, directiveParser); ok { return syntax, cmdline, loc, true } - // use directive with different comment prefix, and search for //syntax= + // use directive with different comment prefix, and search for //key= directiveParser = DirectiveParser{line: line} directiveParser.setComment("//") - if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + if syntax, cmdline, loc, ok := detectDirectiveFromParser(key, dt, directiveParser); ok { return syntax, cmdline, loc, true } - // search for possible json directives - var directive struct { - Syntax string `json:"syntax"` - } - if err := json.Unmarshal(dt, &directive); err == nil { - if directive.Syntax != "" { + // use json directive, and search for { "key": "..." } + jsonDirective := map[string]string{} + if err := json.Unmarshal(dt, &jsonDirective); err == nil { + if v, ok := jsonDirective[key]; ok { loc := []Range{{ Start: Position{Line: line}, End: Position{Line: line}, }} - return directive.Syntax, directive.Syntax, loc, true + return v, v, loc, true } } return "", "", nil, false } -func detectSyntaxFromParser(dt []byte, parser DirectiveParser) (string, string, []Range, bool) { +func detectDirectiveFromParser(key string, dt []byte, parser DirectiveParser) (string, string, []Range, bool) { directives, _ := parser.ParseAll(dt) for _, d := range directives { - // check for syntax directive before erroring out, since the error - // might have occurred *after* the syntax directive - if d.Name == keySyntax { + if d.Name == key { p, _, _ := strings.Cut(d.Value, " ") return p, d.Value, d.Location, true } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go index 9f28a5a2e..08a40c5d4 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go @@ -7,7 +7,7 @@ import ( // ErrorLocation gives a location in source code that caused the error type ErrorLocation struct { - Location []Range + Locations [][]Range error } @@ -34,16 +34,29 @@ func withLocation(err error, start, end int) error { // WithLocation extends an error with a source code location func WithLocation(err error, location []Range) error { + return setLocation(err, location, true) +} + +func SetLocation(err error, location []Range) error { + return setLocation(err, location, false) +} + +func setLocation(err error, location []Range, add bool) error { if err == nil { return nil } var el *ErrorLocation if errors.As(err, &el) { + if add { + el.Locations = append(el.Locations, location) + } else { + el.Locations = [][]Range{location} + } return err } return stack.Enable(&ErrorLocation{ - error: err, - Location: location, + error: err, + Locations: [][]Range{location}, }) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go index db8d0bda2..f8d891c71 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go @@ -17,6 +17,7 @@ import ( var ( errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only") + errDockerfileNotJSONArray = errors.New("not a JSON array") ) const ( @@ -58,11 +59,11 @@ func parseWords(rest string, d *directives) []string { words := []string{} phase := inSpaces - word := "" quote := '\000' blankOK := false var ch rune var chWidth int + var sbuilder strings.Builder for pos := 0; pos <= len(rest); pos += chWidth { if pos != len(rest) { @@ -79,18 +80,18 @@ func parseWords(rest string, d *directives) []string { phase = inWord // found it, fall through } if (phase == inWord || phase == inQuote) && (pos == len(rest)) { - if blankOK || len(word) > 0 { - words = append(words, word) + if blankOK || sbuilder.Len() > 0 { + words = append(words, sbuilder.String()) } break } if phase == inWord { if unicode.IsSpace(ch) { phase = inSpaces - if blankOK || len(word) > 0 { - words = append(words, word) + if blankOK || sbuilder.Len() > 0 { + words = append(words, sbuilder.String()) } - word = "" + sbuilder.Reset() blankOK = false continue } @@ -106,11 +107,11 @@ func parseWords(rest string, d *directives) []string { // If we're not quoted and we see an escape token, then always just // add the escape token plus the char to the word, even if the char // is a quote. - word += string(ch) + sbuilder.WriteRune(ch) pos += chWidth ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) } - word += string(ch) + sbuilder.WriteRune(ch) continue } if phase == inQuote { @@ -124,10 +125,10 @@ func parseWords(rest string, d *directives) []string { continue // just skip the escape token at end } pos += chWidth - word += string(ch) + sbuilder.WriteRune(ch) ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) } - word += string(ch) + sbuilder.WriteRune(ch) } } @@ -154,7 +155,7 @@ func parseNameVal(rest string, key string, d *directives) (*Node, error) { if len(parts) < 2 { return nil, errors.Errorf("%s must have two arguments", key) } - return newKeyValueNode(parts[0], parts[1]), nil + return newKeyValueNode(parts[0], parts[1], ""), nil } var rootNode *Node @@ -165,17 +166,20 @@ func parseNameVal(rest string, key string, d *directives) (*Node, error) { } parts := strings.SplitN(word, "=", 2) - node := newKeyValueNode(parts[0], parts[1]) + node := newKeyValueNode(parts[0], parts[1], "=") rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) } return rootNode, nil } -func newKeyValueNode(key, value string) *Node { +func newKeyValueNode(key, value, sep string) *Node { return &Node{ Value: key, - Next: &Node{Value: value}, + Next: &Node{ + Value: value, + Next: &Node{Value: sep}, + }, } } @@ -187,7 +191,9 @@ func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { prevNode.Next = node } - prevNode = node.Next + for prevNode = node.Next; prevNode.Next != nil; { + prevNode = prevNode.Next + } return rootNode, prevNode } @@ -269,14 +275,14 @@ func parseString(rest string, d *directives) (*Node, map[string]bool, error) { } // parseJSON converts JSON arrays to an AST. -func parseJSON(rest string, d *directives) (*Node, map[string]bool, error) { +func parseJSON(rest string) (*Node, map[string]bool, error) { rest = strings.TrimLeftFunc(rest, unicode.IsSpace) if !strings.HasPrefix(rest, "[") { - return nil, nil, errors.Errorf("Error parsing %q as a JSON array", rest) + return nil, nil, errDockerfileNotJSONArray } var myJSON []interface{} - if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + if err := json.Unmarshal([]byte(rest), &myJSON); err != nil { return nil, nil, err } @@ -307,7 +313,7 @@ func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) return nil, nil, nil } - node, attrs, err := parseJSON(rest, d) + node, attrs, err := parseJSON(rest) if err == nil { return node, attrs, nil @@ -325,7 +331,7 @@ func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) // so, passes to parseJSON; if not, attempts to parse it as a whitespace // delimited string. func parseMaybeJSONToList(rest string, d *directives) (*Node, map[string]bool, error) { - node, attrs, err := parseJSON(rest, d) + node, attrs, err := parseJSON(rest) if err == nil { return node, attrs, nil diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index 4a6129fdc..740f03b70 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -114,7 +114,6 @@ type Heredoc struct { var ( dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - reComment = regexp.MustCompile(`^#.*$`) reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`) reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) ) @@ -168,16 +167,17 @@ func (d *directives) setEscapeToken(s string) error { // possibleParserDirective looks for parser directives, eg '# escapeToken='. // Parser directives must precede any builder instruction or other comments, -// and cannot be repeated. -func (d *directives) possibleParserDirective(line string) error { - directive, err := d.parser.ParseLine([]byte(line)) +// and cannot be repeated. Returns true if a parser directive was found. +func (d *directives) possibleParserDirective(line []byte) (bool, error) { + directive, err := d.parser.ParseLine(line) if err != nil { - return err + return false, err } if directive != nil && directive.Name == keyEscape { - return d.setEscapeToken(directive.Value) + err := d.setEscapeToken(directive.Value) + return err == nil, err } - return nil + return directive != nil, nil } // newDefaultDirectives returns a new directives structure with the default escapeToken token @@ -284,6 +284,7 @@ func Parse(rwc io.Reader) (*Result, error) { scanner.Split(scanLines) warnings := []Warning{} var comments []string + buf := &bytes.Buffer{} var err error for scanner.Scan() { @@ -300,21 +301,29 @@ func Parse(rwc io.Reader) (*Result, error) { comments = append(comments, comment) } } - bytesRead, err = processLine(d, bytesRead, true) + var directiveOk bool + bytesRead, directiveOk, err = processLine(d, bytesRead, true) + // If the line is a directive, strip it from the comments + // so it doesn't get added to the AST. + if directiveOk { + comments = comments[:len(comments)-1] + } if err != nil { return nil, withLocation(err, currentLine, 0) } currentLine++ startLine := currentLine - line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) - if isEndOfLine && line == "" { + bytesRead, isEndOfLine := trimContinuationCharacter(bytesRead, d) + if isEndOfLine && len(bytesRead) == 0 { continue } + buf.Reset() + buf.Write(bytesRead) var hasEmptyContinuationLine bool for !isEndOfLine && scanner.Scan() { - bytesRead, err := processLine(d, scanner.Bytes(), false) + bytesRead, _, err := processLine(d, scanner.Bytes(), false) if err != nil { return nil, withLocation(err, currentLine, 0) } @@ -329,16 +338,17 @@ func Parse(rwc io.Reader) (*Result, error) { continue } - continuationLine := string(bytesRead) - continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) - line += continuationLine + bytesRead, isEndOfLine = trimContinuationCharacter(bytesRead, d) + buf.Write(bytesRead) } + line := buf.String() + if hasEmptyContinuationLine { warnings = append(warnings, Warning{ Short: "Empty continuation line found in: " + line, Detail: [][]byte{[]byte("Empty continuation lines will become errors in a future release")}, - URL: "https://github.com/moby/moby/pull/33719", + URL: "https://docs.docker.com/go/dockerfile/rule/no-empty-continuation/", Location: &Range{Start: Position{Line: currentLine}, End: Position{Line: currentLine}}, }) } @@ -348,7 +358,7 @@ func Parse(rwc io.Reader) (*Result, error) { return nil, withLocation(err, startLine, currentLine) } - if child.canContainHeredoc() { + if child.canContainHeredoc() && strings.Contains(line, "<<") { heredocs, err := heredocsFromLine(line) if err != nil { return nil, withLocation(err, startLine, currentLine) @@ -415,7 +425,7 @@ func heredocFromMatch(match []string) (*Heredoc, error) { // If there are quotes in one but not the other, then we know that some // part of the heredoc word is quoted, so we shouldn't expand the content. shlex.RawQuotes = false - words, err := shlex.ProcessWords(rest, []string{}) + words, err := shlex.ProcessWords(rest, emptyEnvs{}) if err != nil { return nil, err } @@ -425,7 +435,7 @@ func heredocFromMatch(match []string) (*Heredoc, error) { } shlex.RawQuotes = true - wordsRaw, err := shlex.ProcessWords(rest, []string{}) + wordsRaw, err := shlex.ProcessWords(rest, emptyEnvs{}) if err != nil { return nil, err } @@ -466,7 +476,7 @@ func heredocsFromLine(line string) ([]Heredoc, error) { shlex.RawQuotes = true shlex.RawEscapes = true shlex.SkipUnsetEnv = true - words, _ := shlex.ProcessWords(line, []string{}) + words, _ := shlex.ProcessWords(line, emptyEnvs{}) var docs []Heredoc for _, word := range words { @@ -487,7 +497,10 @@ func ChompHeredocContent(src string) string { } func trimComments(src []byte) []byte { - return reComment.ReplaceAll(src, []byte{}) + if !isComment(src) { + return src + } + return nil } func trimLeadingWhitespace(src []byte) []byte { @@ -501,7 +514,8 @@ func trimNewline(src []byte) []byte { } func isComment(line []byte) bool { - return reComment.Match(trimLeadingWhitespace(trimNewline(line))) + line = trimLeadingWhitespace(line) + return len(line) > 0 && line[0] == '#' } func isEmptyContinuationLine(line []byte) bool { @@ -510,9 +524,9 @@ func isEmptyContinuationLine(line []byte) bool { var utf8bom = []byte{0xEF, 0xBB, 0xBF} -func trimContinuationCharacter(line string, d *directives) (string, bool) { - if d.lineContinuationRegex.MatchString(line) { - line = d.lineContinuationRegex.ReplaceAllString(line, "$1") +func trimContinuationCharacter(line []byte, d *directives) ([]byte, bool) { + if d.lineContinuationRegex.Match(line) { + line = d.lineContinuationRegex.ReplaceAll(line, []byte("$1")) return line, false } return line, true @@ -520,12 +534,13 @@ func trimContinuationCharacter(line string, d *directives) (string, bool) { // TODO: remove stripLeftWhitespace after deprecation period. It seems silly // to preserve whitespace on continuation lines. Why is that done? -func processLine(d *directives, token []byte, stripLeftWhitespace bool) ([]byte, error) { +func processLine(d *directives, token []byte, stripLeftWhitespace bool) ([]byte, bool, error) { token = trimNewline(token) if stripLeftWhitespace { token = trimLeadingWhitespace(token) } - return trimComments(token), d.possibleParserDirective(string(token)) + directiveOk, err := d.possibleParserDirective(token) + return trimComments(token), directiveOk, err } // Variation of bufio.ScanLines that preserves the line endings @@ -550,3 +565,13 @@ func handleScannerError(err error) error { return err } } + +type emptyEnvs struct{} + +func (emptyEnvs) Get(string) (string, bool) { + return "", false +} + +func (emptyEnvs) Keys() []string { + return nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go index c0261652f..d1c87522e 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go @@ -36,7 +36,7 @@ func extractBuilderFlags(line string) (string, []string, error) { words := []string{} phase := inSpaces - word := "" + sbuilder := &strings.Builder{} quote := '\000' blankOK := false var ch rune @@ -62,13 +62,14 @@ func extractBuilderFlags(line string) (string, []string, error) { phase = inWord // found something with "--", fall through } if (phase == inWord || phase == inQuote) && (pos == len(line)) { - if word != "--" && (blankOK || len(word) > 0) { + if word := sbuilder.String(); word != "--" && (blankOK || len(word) > 0) { words = append(words, word) } break } if phase == inWord { if unicode.IsSpace(ch) { + word := sbuilder.String() phase = inSpaces if word == "--" { return line[pos:], words, nil @@ -76,7 +77,7 @@ func extractBuilderFlags(line string) (string, []string, error) { if blankOK || len(word) > 0 { words = append(words, word) } - word = "" + sbuilder.Reset() blankOK = false continue } @@ -93,7 +94,9 @@ func extractBuilderFlags(line string) (string, []string, error) { pos++ ch = rune(line[pos]) } - word += string(ch) + if _, err := sbuilder.WriteRune(ch); err != nil { + return "", nil, err + } continue } if phase == inQuote { @@ -109,7 +112,9 @@ func extractBuilderFlags(line string) (string, []string, error) { pos++ ch = rune(line[pos]) } - word += string(ch) + if _, err := sbuilder.WriteRune(ch); err != nil { + return "", nil, err + } } } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go index f9aca5d9e..ec0c8fc74 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -9,3 +9,10 @@ package shell func EqualEnvKeys(from, to string) bool { return from == to } + +// NormalizeEnvKey returns the key in a normalized form that can be used +// for comparison. On Unix this is a no-op. On Windows this converts the +// key to uppercase. +func NormalizeEnvKey(key string) string { + return key +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go index 7bbed9b20..02eedcc49 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go @@ -8,3 +8,10 @@ import "strings" func EqualEnvKeys(from, to string) bool { return strings.EqualFold(from, to) } + +// NormalizeEnvKey returns the key in a normalized form that can be used +// for comparison. On Unix this is a no-op. On Windows this converts the +// key to uppercase. +func NormalizeEnvKey(key string) string { + return strings.ToUpper(key) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index 80806f8ba..18da29334 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -3,6 +3,8 @@ package shell import ( "bytes" "fmt" + "regexp" + "slices" "strings" "text/scanner" "unicode" @@ -10,6 +12,11 @@ import ( "github.com/pkg/errors" ) +type EnvGetter interface { + Get(string) (string, bool) + Keys() []string +} + // Lex performs shell word splitting and variable expansion. // // Lex takes a string and an array of env variables and @@ -17,12 +24,15 @@ import ( // tokens. Tries to mimic bash shell process. // It doesn't support all flavors of ${xx:...} formats but new ones can // be added by adding code to the "special ${} format processing" section +// +// It is not safe to call methods on a Lex instance concurrently. type Lex struct { escapeToken rune RawQuotes bool RawEscapes bool SkipProcessQuotes bool SkipUnsetEnv bool + shellWord shellWord } // NewLex creates a new Lex which uses escapeToken to escape quotes. @@ -31,10 +41,13 @@ func NewLex(escapeToken rune) *Lex { } // ProcessWord will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func (s *Lex) ProcessWord(word string, env []string) (string, error) { - word, _, err := s.process(word, BuildEnvs(env)) - return word, err +// and replace any env var references in 'word'. It will also +// return variables in word which were not found in the 'env' list, +// which is useful in later linting. +// TODO: rename +func (s *Lex) ProcessWord(word string, env EnvGetter) (string, map[string]struct{}, error) { + result, err := s.process(word, env, true) + return result.Result, result.Unmatched, err } // ProcessWords will use the 'env' list of environment variables, @@ -44,63 +57,62 @@ func (s *Lex) ProcessWord(word string, env []string) (string, error) { // this splitting is done **after** the env var substitutions are done. // Note, each one is trimmed to remove leading and trailing spaces (unless // they are quoted", but ProcessWord retains spaces between words. -func (s *Lex) ProcessWords(word string, env []string) ([]string, error) { - _, words, err := s.process(word, BuildEnvs(env)) - return words, err +func (s *Lex) ProcessWords(word string, env EnvGetter) ([]string, error) { + result, err := s.process(word, env, false) + return result.Words, err } -// ProcessWordWithMap will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) { - word, _, err := s.process(word, env) - return word, err +type ProcessWordResult struct { + Result string + Words []string + Matched map[string]struct{} + Unmatched map[string]struct{} } // ProcessWordWithMatches will use the 'env' list of environment variables, // replace any env var references in 'word' and return the env that were used. -func (s *Lex) ProcessWordWithMatches(word string, env map[string]string) (string, map[string]struct{}, error) { - sw := s.init(word, env) - word, _, err := sw.process(word) - return word, sw.matches, err -} - -func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) { - _, words, err := s.process(word, env) - return words, err -} - -func (s *Lex) init(word string, env map[string]string) *shellWord { - sw := &shellWord{ - envs: env, - escapeToken: s.escapeToken, - skipUnsetEnv: s.SkipUnsetEnv, - skipProcessQuotes: s.SkipProcessQuotes, - rawQuotes: s.RawQuotes, - rawEscapes: s.RawEscapes, - matches: make(map[string]struct{}), +func (s *Lex) ProcessWordWithMatches(word string, env EnvGetter) (ProcessWordResult, error) { + return s.process(word, env, true) +} + +func (s *Lex) initWord(word string, env EnvGetter, capture bool) *shellWord { + sw := &s.shellWord + sw.Lex = s + sw.envs = env + sw.capture = capture + sw.rawEscapes = s.RawEscapes + if capture { + sw.matches = nil + sw.nonmatches = nil } sw.scanner.Init(strings.NewReader(word)) return sw } -func (s *Lex) process(word string, env map[string]string) (string, []string, error) { - sw := s.init(word, env) - return sw.process(word) +func (s *Lex) process(word string, env EnvGetter, capture bool) (ProcessWordResult, error) { + sw := s.initWord(word, env, capture) + word, words, err := sw.process(word) + return ProcessWordResult{ + Result: word, + Words: words, + Matched: sw.matches, + Unmatched: sw.nonmatches, + }, err } type shellWord struct { - scanner scanner.Scanner - envs map[string]string - escapeToken rune - rawQuotes bool - rawEscapes bool - skipUnsetEnv bool - skipProcessQuotes bool - matches map[string]struct{} + *Lex + wordsBuffer strings.Builder + scanner scanner.Scanner + envs EnvGetter + rawEscapes bool + capture bool // capture matches and nonmatches + matches map[string]struct{} + nonmatches map[string]struct{} } func (sw *shellWord) process(source string) (string, []string, error) { - word, words, err := sw.processStopOn(scanner.EOF) + word, words, err := sw.processStopOn(scanner.EOF, sw.rawEscapes) if err != nil { err = errors.Wrapf(err, "failed to process %q", source) } @@ -108,16 +120,16 @@ func (sw *shellWord) process(source string) (string, []string, error) { } type wordsStruct struct { - word string + buf *strings.Builder words []string inWord bool } func (w *wordsStruct) addChar(ch rune) { if unicode.IsSpace(ch) && w.inWord { - if len(w.word) != 0 { - w.words = append(w.words, w.word) - w.word = "" + if w.buf.Len() != 0 { + w.words = append(w.words, w.buf.String()) + w.buf.Reset() w.inWord = false } } else if !unicode.IsSpace(ch) { @@ -126,7 +138,7 @@ func (w *wordsStruct) addChar(ch rune) { } func (w *wordsStruct) addRawChar(ch rune) { - w.word += string(ch) + w.buf.WriteRune(ch) w.inWord = true } @@ -137,16 +149,16 @@ func (w *wordsStruct) addString(str string) { } func (w *wordsStruct) addRawString(str string) { - w.word += str + w.buf.WriteString(str) w.inWord = true } func (w *wordsStruct) getWords() []string { - if len(w.word) > 0 { - w.words = append(w.words, w.word) + if w.buf.Len() > 0 { + w.words = append(w.words, w.buf.String()) // Just in case we're called again by mistake - w.word = "" + w.buf.Reset() w.inWord = false } return w.words @@ -154,18 +166,31 @@ func (w *wordsStruct) getWords() []string { // Process the word, starting at 'pos', and stop when we get to the // end of the word or the 'stopChar' character -func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result bytes.Buffer +func (sw *shellWord) processStopOn(stopChar rune, rawEscapes bool) (string, []string, error) { + // result buffer can't be currently shared for shellWord as it is called internally + // by processDollar + var result strings.Builder + sw.wordsBuffer.Reset() var words wordsStruct + words.buf = &sw.wordsBuffer + // no need to initialize all the time var charFuncMapping = map[rune]func() (string, error){ '$': sw.processDollar, } - if !sw.skipProcessQuotes { + if !sw.SkipProcessQuotes { charFuncMapping['\''] = sw.processSingleQuote charFuncMapping['"'] = sw.processDoubleQuote } + // temporarily set sw.rawEscapes if needed + if rawEscapes != sw.rawEscapes { + sw.rawEscapes = rawEscapes + defer func() { + sw.rawEscapes = !rawEscapes + }() + } + for sw.scanner.Peek() != scanner.EOF { ch := sw.scanner.Peek() @@ -230,7 +255,7 @@ func (sw *shellWord) processSingleQuote() (string, error) { var result bytes.Buffer ch := sw.scanner.Next() - if sw.rawQuotes { + if sw.RawQuotes { result.WriteRune(ch) } @@ -240,7 +265,7 @@ func (sw *shellWord) processSingleQuote() (string, error) { case scanner.EOF: return "", errors.New("unexpected end of statement while looking for matching single-quote") case '\'': - if sw.rawQuotes { + if sw.RawQuotes { result.WriteRune(ch) } return result.String(), nil @@ -265,7 +290,7 @@ func (sw *shellWord) processDoubleQuote() (string, error) { var result bytes.Buffer ch := sw.scanner.Next() - if sw.rawQuotes { + if sw.RawQuotes { result.WriteRune(ch) } @@ -275,7 +300,7 @@ func (sw *shellWord) processDoubleQuote() (string, error) { return "", errors.New("unexpected end of statement while looking for matching double-quote") case '"': ch := sw.scanner.Next() - if sw.rawQuotes { + if sw.RawQuotes { result.WriteRune(ch) } return result.String(), nil @@ -319,7 +344,7 @@ func (sw *shellWord) processDollar() (string, error) { return "$", nil } value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { + if !found && sw.SkipUnsetEnv { return "$" + name, nil } return value, nil @@ -342,7 +367,7 @@ func (sw *shellWord) processDollar() (string, error) { case '}': // Normal ${xx} case value, set := sw.getEnv(name) - if !set && sw.skipUnsetEnv { + if !set && sw.SkipUnsetEnv { return fmt.Sprintf("${%s}", name), nil } return value, nil @@ -351,8 +376,12 @@ func (sw *shellWord) processDollar() (string, error) { ch = sw.scanner.Next() chs += string(ch) fallthrough - case '+', '-', '?': - word, _, err := sw.processStopOn('}') + case '+', '-', '?', '#', '%': + rawEscapes := ch == '#' || ch == '%' + if nullIsUnset && rawEscapes { + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) + } + word, _, err := sw.processStopOn('}', rawEscapes) if err != nil { if sw.scanner.Peek() == scanner.EOF { return "", errors.New("syntax error: missing '}'") @@ -363,7 +392,7 @@ func (sw *shellWord) processDollar() (string, error) { // Grab the current value of the variable in question so we // can use it to determine what to do based on the modifier value, set := sw.getEnv(name) - if sw.skipUnsetEnv && !set { + if sw.SkipUnsetEnv && !set { return fmt.Sprintf("${%s%s%s}", name, chs, word), nil } @@ -394,9 +423,61 @@ func (sw *shellWord) processDollar() (string, error) { return "", errors.Errorf("%s: %s", name, message) } return value, nil + case '%', '#': + // %/# matches the shortest pattern expansion, %%/## the longest + greedy := false + + if len(word) > 0 && word[0] == byte(ch) { + greedy = true + word = word[1:] + } + + if ch == '%' { + return trimSuffix(word, value, greedy) + } + return trimPrefix(word, value, greedy) default: return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } + case '/': + replaceAll := sw.scanner.Peek() == '/' + if replaceAll { + sw.scanner.Next() + } + + pattern, _, err := sw.processStopOn('/', true) + if err != nil { + if sw.scanner.Peek() == scanner.EOF { + return "", errors.New("syntax error: missing '/' in ${}") + } + return "", err + } + + replacement, _, err := sw.processStopOn('}', true) + if err != nil { + if sw.scanner.Peek() == scanner.EOF { + return "", errors.New("syntax error: missing '}'") + } + return "", err + } + + value, set := sw.getEnv(name) + if sw.SkipUnsetEnv && !set { + return fmt.Sprintf("${%s/%s/%s}", name, pattern, replacement), nil + } + + re, err := convertShellPatternToRegex(pattern, true, false) + if err != nil { + return "", errors.Errorf("invalid pattern (%s) in substitution: %s", pattern, err) + } + if replaceAll { + value = re.ReplaceAllString(value, replacement) + } else { + if idx := re.FindStringIndex(value); idx != nil { + value = value[0:idx[0]] + replacement + value[idx[1]:] + } + } + return value, nil default: return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } @@ -444,31 +525,155 @@ func isSpecialParam(char rune) bool { } func (sw *shellWord) getEnv(name string) (string, bool) { - for key, value := range sw.envs { - if EqualEnvKeys(name, key) { + v, ok := sw.envs.Get(name) + if ok { + if sw.capture { + if sw.matches == nil { + sw.matches = make(map[string]struct{}) + } sw.matches[name] = struct{}{} - return value, true } + return v, true + } + if sw.capture { + if sw.nonmatches == nil { + sw.nonmatches = make(map[string]struct{}) + } + sw.nonmatches[name] = struct{}{} } return "", false } -func BuildEnvs(env []string) map[string]string { +func EnvsFromSlice(env []string) EnvGetter { envs := map[string]string{} - + keys := make([]string, 0, len(env)) for _, e := range env { - i := strings.Index(e, "=") + k, v, _ := strings.Cut(e, "=") + keys = append(keys, k) + envs[NormalizeEnvKey(k)] = v + } + return &envGetter{env: envs, keys: keys} +} - if i < 0 { - envs[e] = "" - } else { - k := e[:i] - v := e[i+1:] +type envGetter struct { + env map[string]string + keys []string +} + +var _ EnvGetter = &envGetter{} + +func (e *envGetter) Get(key string) (string, bool) { + key = NormalizeEnvKey(key) + v, ok := e.env[key] + return v, ok +} + +func (e *envGetter) Keys() []string { + return e.keys +} + +// convertShellPatternToRegex converts a shell-like wildcard pattern +// (? is a single char, * either the shortest or longest (greedy) string) +// to an equivalent regular expression. +// +// Based on +// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_13 +// but without the bracket expressions (`[]`) +func convertShellPatternToRegex(pattern string, greedy bool, anchored bool) (*regexp.Regexp, error) { + var s scanner.Scanner + s.Init(strings.NewReader(pattern)) + var out strings.Builder + out.Grow(len(pattern) + 4) + + // match only at the beginning of the string + if anchored { + out.WriteByte('^') + } + + // default: non-greedy wildcards + starPattern := ".*?" + if greedy { + starPattern = ".*" + } - // overwrite value if key already exists - envs[k] = v + for tok := s.Next(); tok != scanner.EOF; tok = s.Next() { + switch tok { + case '*': + out.WriteString(starPattern) + continue + case '?': + out.WriteByte('.') + continue + case '\\': + // } and / as part of ${} need to be escaped, but the escape isn't part + // of the pattern + if s.Peek() == '}' || s.Peek() == '/' { + continue + } + out.WriteRune('\\') + tok = s.Next() + if tok != '*' && tok != '?' && tok != '\\' { + return nil, errors.Errorf("invalid escape '\\%c'", tok) + } + // regex characters that need to be escaped + // escaping closing is optional, but done for consistency + case '[', ']', '{', '}', '.', '+', '(', ')', '|', '^', '$': + out.WriteByte('\\') } + out.WriteRune(tok) + } + return regexp.Compile(out.String()) +} + +func trimPrefix(word, value string, greedy bool) (string, error) { + re, err := convertShellPatternToRegex(word, greedy, true) + if err != nil { + return "", errors.Errorf("invalid pattern (%s) in substitution: %s", word, err) } - return envs + if idx := re.FindStringIndex(value); idx != nil { + value = value[idx[1]:] + } + return value, nil +} + +// reverse without avoid reversing escapes, i.e. a\*c -> c\*a +func reversePattern(pattern string) string { + patternRunes := []rune(pattern) + out := make([]rune, len(patternRunes)) + lastIdx := len(patternRunes) - 1 + for i := 0; i <= lastIdx; { + tok := patternRunes[i] + outIdx := lastIdx - i + if tok == '\\' && i != lastIdx { + out[outIdx-1] = tok + // the pattern is taken from a ${var#pattern}, so the last + // character can't be an escape character + out[outIdx] = patternRunes[i+1] + i += 2 + } else { + out[outIdx] = tok + i++ + } + } + return string(out) +} + +func reverseString(str string) string { + out := []rune(str) + slices.Reverse(out) + return string(out) +} + +func trimSuffix(pattern, word string, greedy bool) (string, error) { + // regular expressions can't handle finding the shortest rightmost + // string so we reverse both search space and pattern to convert it + // to a leftmost search in both cases + pattern = reversePattern(pattern) + word = reverseString(word) + str, err := trimPrefix(pattern, word, greedy) + if err != nil { + return "", err + } + return reverseString(str), nil } diff --git a/vendor/github.com/moby/buildkit/util/stack/compress.go b/vendor/github.com/moby/buildkit/util/stack/compress.go new file mode 100644 index 000000000..6d9a78f34 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/compress.go @@ -0,0 +1,67 @@ +package stack + +import ( + "slices" +) + +func compressStacks(st []*Stack) []*Stack { + if len(st) == 0 { + return nil + } + + slices.SortFunc(st, func(a, b *Stack) int { + return len(b.Frames) - len(a.Frames) + }) + + out := []*Stack{st[0]} + +loop0: + for _, st := range st[1:] { + maxIdx := -1 + for _, prev := range out { + idx := subFrames(st.Frames, prev.Frames) + if idx == -1 { + continue + } + // full match, potentially skip all + if idx == len(st.Frames)-1 { + if st.Pid == prev.Pid && st.Version == prev.Version && slices.Compare(st.Cmdline, st.Cmdline) == 0 { + continue loop0 + } + } + if idx > maxIdx { + maxIdx = idx + } + } + + if maxIdx > 0 { + st.Frames = st.Frames[:len(st.Frames)-maxIdx] + } + out = append(out, st) + } + + return out +} + +func subFrames(a, b []*Frame) int { + idx := -1 + i := len(a) - 1 + j := len(b) - 1 + for i >= 0 { + if j < 0 { + break + } + if a[i].Equal(b[j]) { + idx++ + i-- + j-- + } else { + break + } + } + return idx +} + +func (a *Frame) Equal(b *Frame) bool { + return a.File == b.File && a.Line == b.Line && a.Name == b.Name +} diff --git a/vendor/github.com/moby/buildkit/util/stack/generate.go b/vendor/github.com/moby/buildkit/util/stack/generate.go deleted file mode 100644 index f3e967787..000000000 --- a/vendor/github.com/moby/buildkit/util/stack/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package stack - -//go:generate protoc -I=. -I=../../vendor/ --go_out=. --go_opt=paths=source_relative --go_opt=Mstack.proto=/util/stack stack.proto diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index fb9fc3ddf..458a1a781 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -44,24 +44,29 @@ func Helper() { } func Traces(err error) []*Stack { + return compressStacks(traces(err)) +} + +func traces(err error) []*Stack { var st []*Stack - wrapped, ok := err.(interface { - Unwrap() error - }) - if ok { - st = Traces(wrapped.Unwrap()) + switch e := err.(type) { + case interface{ Unwrap() error }: + st = Traces(e.Unwrap()) + case interface{ Unwrap() []error }: + for _, ue := range e.Unwrap() { + st = Traces(ue) + // Only take first stack + if len(st) > 0 { + break + } + } } - if ste, ok := err.(interface { - StackTrace() errors.StackTrace - }); ok { + switch ste := err.(type) { + case interface{ StackTrace() errors.StackTrace }: st = append(st, convertStack(ste.StackTrace())) - } - - if ste, ok := err.(interface { - StackTrace() *Stack - }); ok { + case interface{ StackTrace() *Stack }: st = append(st, ste.StackTrace()) } diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index 43809d487..87917a202 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.35.1 // protoc v3.11.4 -// source: stack.proto +// source: github.com/moby/buildkit/util/stack/stack.proto package stack @@ -34,11 +34,9 @@ type Stack struct { func (x *Stack) Reset() { *x = Stack{} - if protoimpl.UnsafeEnabled { - mi := &file_stack_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Stack) String() string { @@ -48,8 +46,8 @@ func (x *Stack) String() string { func (*Stack) ProtoMessage() {} func (x *Stack) ProtoReflect() protoreflect.Message { - mi := &file_stack_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes[0] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -61,7 +59,7 @@ func (x *Stack) ProtoReflect() protoreflect.Message { // Deprecated: Use Stack.ProtoReflect.Descriptor instead. func (*Stack) Descriptor() ([]byte, []int) { - return file_stack_proto_rawDescGZIP(), []int{0} + return file_github_com_moby_buildkit_util_stack_stack_proto_rawDescGZIP(), []int{0} } func (x *Stack) GetFrames() []*Frame { @@ -111,11 +109,9 @@ type Frame struct { func (x *Frame) Reset() { *x = Frame{} - if protoimpl.UnsafeEnabled { - mi := &file_stack_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Frame) String() string { @@ -125,8 +121,8 @@ func (x *Frame) String() string { func (*Frame) ProtoMessage() {} func (x *Frame) ProtoReflect() protoreflect.Message { - mi := &file_stack_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes[1] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -138,7 +134,7 @@ func (x *Frame) ProtoReflect() protoreflect.Message { // Deprecated: Use Frame.ProtoReflect.Descriptor instead. func (*Frame) Descriptor() ([]byte, []int) { - return file_stack_proto_rawDescGZIP(), []int{1} + return file_github_com_moby_buildkit_util_stack_stack_proto_rawDescGZIP(), []int{1} } func (x *Frame) GetName() string { @@ -162,45 +158,49 @@ func (x *Frame) GetLine() int32 { return 0 } -var File_stack_proto protoreflect.FileDescriptor - -var file_stack_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, - 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x24, - 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x06, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, +var File_github_com_moby_buildkit_util_stack_stack_proto protoreflect.FileDescriptor + +var file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, + 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, + 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x63, 0x6b, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, + 0x52, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x70, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, + 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x42, + 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, + 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, + 0x2f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_stack_proto_rawDescOnce sync.Once - file_stack_proto_rawDescData = file_stack_proto_rawDesc + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescOnce sync.Once + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc ) -func file_stack_proto_rawDescGZIP() []byte { - file_stack_proto_rawDescOnce.Do(func() { - file_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_stack_proto_rawDescData) +func file_github_com_moby_buildkit_util_stack_stack_proto_rawDescGZIP() []byte { + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescOnce.Do(func() { + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData) }) - return file_stack_proto_rawDescData + return file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData } -var file_stack_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_stack_proto_goTypes = []interface{}{ +var file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_moby_buildkit_util_stack_stack_proto_goTypes = []any{ (*Stack)(nil), // 0: stack.Stack (*Frame)(nil), // 1: stack.Frame } -var file_stack_proto_depIdxs = []int32{ +var file_github_com_moby_buildkit_util_stack_stack_proto_depIdxs = []int32{ 1, // 0: stack.Stack.frames:type_name -> stack.Frame 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type @@ -209,53 +209,27 @@ var file_stack_proto_depIdxs = []int32{ 0, // [0:1] is the sub-list for field type_name } -func init() { file_stack_proto_init() } -func file_stack_proto_init() { - if File_stack_proto != nil { +func init() { file_github_com_moby_buildkit_util_stack_stack_proto_init() } +func file_github_com_moby_buildkit_util_stack_stack_proto_init() { + if File_github_com_moby_buildkit_util_stack_stack_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_stack_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Stack); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_stack_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Frame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_stack_proto_rawDesc, + RawDescriptor: file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_stack_proto_goTypes, - DependencyIndexes: file_stack_proto_depIdxs, - MessageInfos: file_stack_proto_msgTypes, + GoTypes: file_github_com_moby_buildkit_util_stack_stack_proto_goTypes, + DependencyIndexes: file_github_com_moby_buildkit_util_stack_stack_proto_depIdxs, + MessageInfos: file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes, }.Build() - File_stack_proto = out.File - file_stack_proto_rawDesc = nil - file_stack_proto_goTypes = nil - file_stack_proto_depIdxs = nil + File_github_com_moby_buildkit_util_stack_stack_proto = out.File + file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = nil + file_github_com_moby_buildkit_util_stack_stack_proto_goTypes = nil + file_github_com_moby_buildkit_util_stack_stack_proto_depIdxs = nil } diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.proto b/vendor/github.com/moby/buildkit/util/stack/stack.proto index 9c63bc362..aec8c557e 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.proto +++ b/vendor/github.com/moby/buildkit/util/stack/stack.proto @@ -2,16 +2,18 @@ syntax = "proto3"; package stack; +option go_package = "github.com/moby/buildkit/util/stack"; + message Stack { - repeated Frame frames = 1; - repeated string cmdline = 2; - int32 pid = 3; - string version = 4; - string revision = 5; + repeated Frame frames = 1; + repeated string cmdline = 2; + int32 pid = 3; + string version = 4; + string revision = 5; } message Frame { - string Name = 1; - string File = 2; - int32 Line = 3; -} \ No newline at end of file + string Name = 1; + string File = 2; + int32 Line = 3; +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack_vtproto.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack_vtproto.pb.go new file mode 100644 index 000000000..270eccc03 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/stack_vtproto.pb.go @@ -0,0 +1,660 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10 +// source: github.com/moby/buildkit/util/stack/stack.proto + +package stack + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Stack) CloneVT() *Stack { + if m == nil { + return (*Stack)(nil) + } + r := new(Stack) + r.Pid = m.Pid + r.Version = m.Version + r.Revision = m.Revision + if rhs := m.Frames; rhs != nil { + tmpContainer := make([]*Frame, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Frames = tmpContainer + } + if rhs := m.Cmdline; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cmdline = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Stack) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Frame) CloneVT() *Frame { + if m == nil { + return (*Frame)(nil) + } + r := new(Frame) + r.Name = m.Name + r.File = m.File + r.Line = m.Line + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Frame) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (this *Stack) EqualVT(that *Stack) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Frames) != len(that.Frames) { + return false + } + for i, vx := range this.Frames { + vy := that.Frames[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &Frame{} + } + if q == nil { + q = &Frame{} + } + if !p.EqualVT(q) { + return false + } + } + } + if len(this.Cmdline) != len(that.Cmdline) { + return false + } + for i, vx := range this.Cmdline { + vy := that.Cmdline[i] + if vx != vy { + return false + } + } + if this.Pid != that.Pid { + return false + } + if this.Version != that.Version { + return false + } + if this.Revision != that.Revision { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Stack) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Stack) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *Frame) EqualVT(that *Frame) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.File != that.File { + return false + } + if this.Line != that.Line { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Frame) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Frame) + if !ok { + return false + } + return this.EqualVT(that) +} +func (m *Stack) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Stack) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Stack) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if m.Pid != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Pid)) + i-- + dAtA[i] = 0x18 + } + if len(m.Cmdline) > 0 { + for iNdEx := len(m.Cmdline) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cmdline[iNdEx]) + copy(dAtA[i:], m.Cmdline[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cmdline[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Frames) > 0 { + for iNdEx := len(m.Frames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Frames[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Frame) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Frame) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Frame) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Line != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Line)) + i-- + dAtA[i] = 0x18 + } + if len(m.File) > 0 { + i -= len(m.File) + copy(dAtA[i:], m.File) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.File))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Stack) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Frames) > 0 { + for _, e := range m.Frames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Cmdline) > 0 { + for _, s := range m.Cmdline { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Pid != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Pid)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Frame) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.File) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Line != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Line)) + } + n += len(m.unknownFields) + return n +} + +func (m *Stack) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stack: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stack: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frames = append(m.Frames, &Frame{}) + if err := m.Frames[len(m.Frames)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cmdline", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cmdline = append(m.Cmdline, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Frame) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Frame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Frame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.File = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + m.Line = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Line |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/moby/sys/capability/.codespellrc b/vendor/github.com/moby/sys/capability/.codespellrc new file mode 100644 index 000000000..e874be563 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = ./.git +ignore-words-list = nd diff --git a/vendor/github.com/moby/sys/capability/.golangci.yml b/vendor/github.com/moby/sys/capability/.golangci.yml new file mode 100644 index 000000000..d775aadd6 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/.golangci.yml @@ -0,0 +1,6 @@ +linters: + enable: + - unconvert + - unparam + - gofumpt + - errorlint diff --git a/vendor/github.com/moby/sys/capability/CHANGELOG.md b/vendor/github.com/moby/sys/capability/CHANGELOG.md new file mode 100644 index 000000000..037ef010a --- /dev/null +++ b/vendor/github.com/moby/sys/capability/CHANGELOG.md @@ -0,0 +1,90 @@ +# Changelog +This file documents all notable changes made to this project since the initial fork +from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.3.0] - 2024-09-25 + +### Added +* Added [ListKnown] and [ListSupported] functions. (#153) +* [LastCap] is now available on non-Linux platforms (where it returns an error). (#152) + +### Changed +* [List] is now deprecated in favor of [ListKnown] and [ListSupported]. (#153) + +### Fixed +* Various documentation improvements. (#151) +* Fix "generated code" comment. (#153) + +## [0.2.0] - 2024-09-16 + +This is the first release after the move to a new home in +github.com/moby/sys/capability. + +### Fixed + * Fixed URLs in documentation to reflect the new home. + +## [0.1.1] - 2024-08-01 + +This is a maintenance release, fixing a few minor issues. + +### Fixed + * Fixed future kernel compatibility, for real this time. [#11] + * Fixed [LastCap] to be a function. [#12] + +## [0.1.0] - 2024-07-31 + +This is an initial release since the fork. + +### Breaking changes + + * The `CAP_LAST_CAP` variable is removed; users need to modify the code to + use [LastCap] to get the value. [#6] + * The code now requires Go >= 1.21. + +### Added + * `go.mod` and `go.sum` files. [#2] + * New [LastCap] function. [#6] + * Basic CI using GHA infra. [#8], [#9] + * README and CHANGELOG. [#10] + +### Fixed + * Fixed ambient capabilities error handling in [Apply]. [#3] + * Fixed future kernel compatibility. [#1] + * Fixed various linter warnings. [#4], [#7] + +### Changed + * Go build tags changed from old-style (`+build`) to new Go 1.17+ style (`go:build`). [#2] + +### Removed + * Removed support for capabilities v1 and v2. [#1] + * Removed init function so programs that use this package start faster. [#6] + * Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6] + + +[Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply +[LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap +[List]: https://pkg.go.dev/github.com/moby/sys/capability#List +[ListKnown]: https://pkg.go.dev/github.com/moby/sys/capability#ListKnown +[ListSupported]: https://pkg.go.dev/github.com/moby/sys/capability#ListSupported + + +[0.3.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.3.0 +[0.2.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.2.0 +[0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1 +[0.1.0]: https://github.com/kolyshkin/capability/compare/42c35b4376354fd5...v0.1.0 + + +[#1]: https://github.com/kolyshkin/capability/pull/1 +[#2]: https://github.com/kolyshkin/capability/pull/2 +[#3]: https://github.com/kolyshkin/capability/pull/3 +[#4]: https://github.com/kolyshkin/capability/pull/4 +[#6]: https://github.com/kolyshkin/capability/pull/6 +[#7]: https://github.com/kolyshkin/capability/pull/7 +[#8]: https://github.com/kolyshkin/capability/pull/8 +[#9]: https://github.com/kolyshkin/capability/pull/9 +[#10]: https://github.com/kolyshkin/capability/pull/10 +[#11]: https://github.com/kolyshkin/capability/pull/11 +[#12]: https://github.com/kolyshkin/capability/pull/12 diff --git a/vendor/github.com/moby/sys/capability/LICENSE b/vendor/github.com/moby/sys/capability/LICENSE new file mode 100644 index 000000000..08adcd6ec --- /dev/null +++ b/vendor/github.com/moby/sys/capability/LICENSE @@ -0,0 +1,25 @@ +Copyright 2023 The Capability Authors. +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/sys/capability/README.md b/vendor/github.com/moby/sys/capability/README.md new file mode 100644 index 000000000..84b74871a --- /dev/null +++ b/vendor/github.com/moby/sys/capability/README.md @@ -0,0 +1,13 @@ +This is a fork of (apparently no longer maintained) +https://github.com/syndtr/gocapability package. It provides basic primitives to +work with [Linux capabilities][capabilities(7)]. + +For changes, see [CHANGELOG.md](./CHANGELOG.md). + +[![Go Reference](https://pkg.go.dev/badge/github.com/moby/sys/capability/capability.svg)](https://pkg.go.dev/github.com/moby/sys/capability) + +## Alternatives + + * https://pkg.go.dev/kernel.org/pub/linux/libs/security/libcap/cap + +[capabilities(7)]: https://man7.org/linux/man-pages/man7/capabilities.7.html diff --git a/vendor/github.com/moby/sys/capability/capability.go b/vendor/github.com/moby/sys/capability/capability.go new file mode 100644 index 000000000..1b36f5f22 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/capability.go @@ -0,0 +1,144 @@ +// Copyright 2023 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package capability provides utilities for manipulating POSIX capabilities. +package capability + +type Capabilities interface { + // Get check whether a capability present in the given + // capabilities set. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Get(which CapType, what Cap) bool + + // Empty check whether all capability bits of the given capabilities + // set are zero. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Empty(which CapType) bool + + // Full check whether all capability bits of the given capabilities + // set are one. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Full(which CapType) bool + + // Set sets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Set(which CapType, caps ...Cap) + + // Unset unsets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Unset(which CapType, caps ...Cap) + + // Fill sets all bits of the given capabilities kind to one. The + // 'kind' value should be one or combination (OR'ed) of CAPS, + // BOUNDS or AMBS. + Fill(kind CapType) + + // Clear sets all bits of the given capabilities kind to zero. The + // 'kind' value should be one or combination (OR'ed) of CAPS, + // BOUNDS or AMBS. + Clear(kind CapType) + + // String return current capabilities state of the given capabilities + // set as string. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE BOUNDING or AMBIENT + StringCap(which CapType) string + + // String return current capabilities state as string. + String() string + + // Load load actual capabilities value. This will overwrite all + // outstanding changes. + Load() error + + // Apply apply the capabilities settings, so all changes will take + // effect. + Apply(kind CapType) error +} + +// NewPid initializes a new [Capabilities] object for given pid when +// it is nonzero, or for the current process if pid is 0. +// +// Deprecated: Replace with [NewPid2] followed by [Capabilities.Load]. +// For example, replace: +// +// c, err := NewPid(0) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewPid2(0) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewPid(pid int) (Capabilities, error) { + c, err := newPid(pid) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewPid2 initializes a new [Capabilities] object for given pid when +// it is nonzero, or for the current process if pid is 0. This +// does not load the process's current capabilities; to do that you +// must call [Capabilities.Load] explicitly. +func NewPid2(pid int) (Capabilities, error) { + return newPid(pid) +} + +// NewFile initializes a new Capabilities object for given file path. +// +// Deprecated: Replace with [NewFile2] followed by [Capabilities.Load]. +// For example, replace: +// +// c, err := NewFile(path) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewFile2(path) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewFile(path string) (Capabilities, error) { + c, err := newFile(path) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewFile2 creates a new initialized [Capabilities] object for given +// file path. This does not load the process's current capabilities; +// to do that you must call [Capabilities.Load] explicitly. +func NewFile2(path string) (Capabilities, error) { + return newFile(path) +} + +// LastCap returns highest valid capability of the running kernel, +// or an error if it can not be obtained. +// +// See also: [ListSupported]. +func LastCap() (Cap, error) { + return lastCap() +} diff --git a/vendor/github.com/moby/sys/capability/capability_linux.go b/vendor/github.com/moby/sys/capability/capability_linux.go new file mode 100644 index 000000000..aa600e1d9 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/capability_linux.go @@ -0,0 +1,541 @@ +// Copyright 2023 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package capability + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "syscall" +) + +const ( + linuxCapVer1 = 0x19980330 // No longer supported. + linuxCapVer2 = 0x20071026 // No longer supported. + linuxCapVer3 = 0x20080522 +) + +var lastCap = sync.OnceValues(func() (Cap, error) { + f, err := os.Open("/proc/sys/kernel/cap_last_cap") + if err != nil { + return 0, err + } + + buf := make([]byte, 11) + l, err := f.Read(buf) + f.Close() + if err != nil { + return 0, err + } + buf = buf[:l] + + last, err := strconv.Atoi(strings.TrimSpace(string(buf))) + if err != nil { + return 0, err + } + return Cap(last), nil +}) + +func capUpperMask() uint32 { + last, err := lastCap() + if err != nil || last < 32 { + return 0 + } + return (uint32(1) << (uint(last) - 31)) - 1 +} + +func mkStringCap(c Capabilities, which CapType) (ret string) { + last, err := lastCap() + if err != nil { + return "" + } + for i, first := Cap(0), true; i <= last; i++ { + if !c.Get(which, i) { + continue + } + if first { + first = false + } else { + ret += ", " + } + ret += i.String() + } + return +} + +func mkString(c Capabilities, max CapType) (ret string) { + ret = "{" + for i := CapType(1); i <= max; i <<= 1 { + ret += " " + i.String() + "=\"" + if c.Empty(i) { + ret += "empty" + } else if c.Full(i) { + ret += "full" + } else { + ret += c.StringCap(i) + } + ret += "\"" + } + ret += " }" + return +} + +var capVersion = sync.OnceValues(func() (uint32, error) { + var hdr capHeader + err := capget(&hdr, nil) + return hdr.version, err +}) + +func newPid(pid int) (c Capabilities, retErr error) { + ver, err := capVersion() + if err != nil { + retErr = fmt.Errorf("unable to get capability version from the kernel: %w", err) + return + } + switch ver { + case linuxCapVer1, linuxCapVer2: + retErr = errors.New("old/unsupported capability version (kernel older than 2.6.26?)") + default: + // Either linuxCapVer3, or an unknown/future version (such as v4). + // In the latter case, we fall back to v3 as the latest version known + // to this package, as kernel should be backward-compatible to v3. + p := new(capsV3) + p.hdr.version = linuxCapVer3 + p.hdr.pid = int32(pid) + c = p + } + return +} + +type capsV3 struct { + hdr capHeader + data [2]capData + bounds [2]uint32 + ambient [2]uint32 +} + +func (c *capsV3) Get(which CapType, what Cap) bool { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable |= 1 << uint(what) + } + if which&BOUNDING != 0 { + c.bounds[i] |= 1 << uint(what) + } + if which&AMBIENT != 0 { + c.ambient[i] |= 1 << uint(what) + } + } +} + +func (c *capsV3) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable &= ^(1 << uint(what)) + } + if which&BOUNDING != 0 { + c.bounds[i] &= ^(1 << uint(what)) + } + if which&AMBIENT != 0 { + c.ambient[i] &= ^(1 << uint(what)) + } + } +} + +func (c *capsV3) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0xffffffff + c.data[0].permitted = 0xffffffff + c.data[0].inheritable = 0 + c.data[1].effective = 0xffffffff + c.data[1].permitted = 0xffffffff + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0xffffffff + c.bounds[1] = 0xffffffff + } + if kind&AMBS == AMBS { + c.ambient[0] = 0xffffffff + c.ambient[1] = 0xffffffff + } +} + +func (c *capsV3) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0 + c.data[0].permitted = 0 + c.data[0].inheritable = 0 + c.data[1].effective = 0 + c.data[1].permitted = 0 + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0 + c.bounds[1] = 0 + } + if kind&AMBS == AMBS { + c.ambient[0] = 0 + c.ambient[1] = 0 + } +} + +func (c *capsV3) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsV3) String() (ret string) { + return mkString(c, BOUNDING) +} + +func (c *capsV3) Load() (err error) { + err = capget(&c.hdr, &c.data[0]) + if err != nil { + return + } + + path := "/proc/self/status" + if c.hdr.pid != 0 { + path = fmt.Sprintf("/proc/%d/status", c.hdr.pid) + } + + f, err := os.Open(path) + if err != nil { + return + } + b := bufio.NewReader(f) + for { + line, e := b.ReadString('\n') + if e != nil { + if e != io.EOF { + err = e + } + break + } + if strings.HasPrefix(line, "CapB") { + _, err = fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) + if err != nil { + break + } + continue + } + if strings.HasPrefix(line, "CapA") { + _, err = fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) + if err != nil { + break + } + continue + } + } + f.Close() + + return +} + +func (c *capsV3) Apply(kind CapType) (err error) { + last, err := LastCap() + if err != nil { + return err + } + if kind&BOUNDS == BOUNDS { + var data [2]capData + err = capget(&c.hdr, &data[0]) + if err != nil { + return + } + if (1< 31 { + if c.data.version == 1 { + return false + } + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable |= 1 << uint(what) + } + } +} + +func (c *capsFile) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable &= ^(1 << uint(what)) + } + } +} + +func (c *capsFile) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0xffffffff + c.data.data[0].permitted = 0xffffffff + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0xffffffff + c.data.data[1].permitted = 0xffffffff + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0 + c.data.data[0].permitted = 0 + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0 + c.data.data[1].permitted = 0 + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsFile) String() (ret string) { + return mkString(c, INHERITABLE) +} + +func (c *capsFile) Load() (err error) { + return getVfsCap(c.path, &c.data) +} + +func (c *capsFile) Apply(kind CapType) (err error) { + if kind&CAPS == CAPS { + return setVfsCap(c.path, &c.data) + } + return +} diff --git a/vendor/github.com/moby/sys/capability/capability_noop.go b/vendor/github.com/moby/sys/capability/capability_noop.go new file mode 100644 index 000000000..ba819ff05 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/capability_noop.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +package capability + +import "errors" + +var errNotSup = errors.New("not supported") + +func newPid(_ int) (Capabilities, error) { + return nil, errNotSup +} + +func newFile(_ string) (Capabilities, error) { + return nil, errNotSup +} + +func lastCap() (Cap, error) { + return -1, errNotSup +} diff --git a/vendor/github.com/moby/sys/capability/enum.go b/vendor/github.com/moby/sys/capability/enum.go new file mode 100644 index 000000000..f89f0273a --- /dev/null +++ b/vendor/github.com/moby/sys/capability/enum.go @@ -0,0 +1,330 @@ +// Copyright 2024 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package capability + +import "slices" + +type CapType uint + +func (c CapType) String() string { + switch c { + case EFFECTIVE: + return "effective" + case PERMITTED: + return "permitted" + case INHERITABLE: + return "inheritable" + case BOUNDING: + return "bounding" + case CAPS: + return "caps" + case AMBIENT: + return "ambient" + } + return "unknown" +} + +const ( + EFFECTIVE CapType = 1 << iota + PERMITTED + INHERITABLE + BOUNDING + AMBIENT + + CAPS = EFFECTIVE | PERMITTED | INHERITABLE + BOUNDS = BOUNDING + AMBS = AMBIENT +) + +//go:generate go run enumgen/gen.go +type Cap int + +// POSIX-draft defined capabilities and Linux extensions. +// +// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h +const ( + // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this + // overrides the restriction of changing file ownership and group + // ownership. + CAP_CHOWN = Cap(0) + + // Override all DAC access, including ACL execute access if + // [_POSIX_ACL] is defined. Excluding DAC access covered by + // CAP_LINUX_IMMUTABLE. + CAP_DAC_OVERRIDE = Cap(1) + + // Overrides all DAC restrictions regarding read and search on files + // and directories, including ACL restrictions if [_POSIX_ACL] is + // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. + CAP_DAC_READ_SEARCH = Cap(2) + + // Overrides all restrictions about allowed operations on files, where + // file owner ID must be equal to the user ID, except where CAP_FSETID + // is applicable. It doesn't override MAC and DAC restrictions. + CAP_FOWNER = Cap(3) + + // Overrides the following restrictions that the effective user ID + // shall match the file owner ID when setting the S_ISUID and S_ISGID + // bits on that file; that the effective group ID (or one of the + // supplementary group IDs) shall match the file owner ID when setting + // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are + // cleared on successful return from chown(2) (not implemented). + CAP_FSETID = Cap(4) + + // Overrides the restriction that the real or effective user ID of a + // process sending a signal must match the real or effective user ID + // of the process receiving the signal. + CAP_KILL = Cap(5) + + // Allows setgid(2) manipulation + // Allows setgroups(2) + // Allows forged gids on socket credentials passing. + CAP_SETGID = Cap(6) + + // Allows set*uid(2) manipulation (including fsuid). + // Allows forged pids on socket credentials passing. + CAP_SETUID = Cap(7) + + // Linux-specific capabilities + + // Without VFS support for capabilities: + // Transfer any capability in your permitted set to any pid, + // remove any capability in your permitted set from any pid + // With VFS support for capabilities (neither of above, but) + // Add any capability from current's capability bounding set + // to the current process' inheritable set + // Allow taking bits out of capability bounding set + // Allow modification of the securebits for a process + CAP_SETPCAP = Cap(8) + + // Allow modification of S_IMMUTABLE and S_APPEND file attributes + CAP_LINUX_IMMUTABLE = Cap(9) + + // Allows binding to TCP/UDP sockets below 1024 + // Allows binding to ATM VCIs below 32 + CAP_NET_BIND_SERVICE = Cap(10) + + // Allow broadcasting, listen to multicast + CAP_NET_BROADCAST = Cap(11) + + // Allow interface configuration + // Allow administration of IP firewall, masquerading and accounting + // Allow setting debug option on sockets + // Allow modification of routing tables + // Allow setting arbitrary process / process group ownership on + // sockets + // Allow binding to any address for transparent proxying (also via NET_RAW) + // Allow setting TOS (type of service) + // Allow setting promiscuous mode + // Allow clearing driver statistics + // Allow multicasting + // Allow read/write of device-specific registers + // Allow activation of ATM control sockets + CAP_NET_ADMIN = Cap(12) + + // Allow use of RAW sockets + // Allow use of PACKET sockets + // Allow binding to any address for transparent proxying (also via NET_ADMIN) + CAP_NET_RAW = Cap(13) + + // Allow locking of shared memory segments + // Allow mlock and mlockall (which doesn't really have anything to do + // with IPC) + CAP_IPC_LOCK = Cap(14) + + // Override IPC ownership checks + CAP_IPC_OWNER = Cap(15) + + // Insert and remove kernel modules - modify kernel without limit + CAP_SYS_MODULE = Cap(16) + + // Allow ioperm/iopl access + // Allow sending USB messages to any device via /proc/bus/usb + CAP_SYS_RAWIO = Cap(17) + + // Allow use of chroot() + CAP_SYS_CHROOT = Cap(18) + + // Allow ptrace() of any process + CAP_SYS_PTRACE = Cap(19) + + // Allow configuration of process accounting + CAP_SYS_PACCT = Cap(20) + + // Allow configuration of the secure attention key + // Allow administration of the random device + // Allow examination and configuration of disk quotas + // Allow setting the domainname + // Allow setting the hostname + // Allow calling bdflush() + // Allow mount() and umount(), setting up new smb connection + // Allow some autofs root ioctls + // Allow nfsservctl + // Allow VM86_REQUEST_IRQ + // Allow to read/write pci config on alpha + // Allow irix_prctl on mips (setstacksize) + // Allow flushing all cache on m68k (sys_cacheflush) + // Allow removing semaphores + // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores + // and shared memory + // Allow locking/unlocking of shared memory segment + // Allow turning swap on/off + // Allow forged pids on socket credentials passing + // Allow setting readahead and flushing buffers on block devices + // Allow setting geometry in floppy driver + // Allow turning DMA on/off in xd driver + // Allow administration of md devices (mostly the above, but some + // extra ioctls) + // Allow tuning the ide driver + // Allow access to the nvram device + // Allow administration of apm_bios, serial and bttv (TV) device + // Allow manufacturer commands in isdn CAPI support driver + // Allow reading non-standardized portions of pci configuration space + // Allow DDI debug ioctl on sbpcd driver + // Allow setting up serial ports + // Allow sending raw qic-117 commands + // Allow enabling/disabling tagged queuing on SCSI controllers and sending + // arbitrary SCSI commands + // Allow setting encryption key on loopback filesystem + // Allow setting zone reclaim policy + // Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility + CAP_SYS_ADMIN = Cap(21) + + // Allow use of reboot() + CAP_SYS_BOOT = Cap(22) + + // Allow raising priority and setting priority on other (different + // UID) processes + // Allow use of FIFO and round-robin (realtime) scheduling on own + // processes and setting the scheduling algorithm used by another + // process. + // Allow setting cpu affinity on other processes + CAP_SYS_NICE = Cap(23) + + // Override resource limits. Set resource limits. + // Override quota limits. + // Override reserved space on ext2 filesystem + // Modify data journaling mode on ext3 filesystem (uses journaling + // resources) + // NOTE: ext2 honors fsuid when checking for resource overrides, so + // you can override using fsuid too + // Override size restrictions on IPC message queues + // Allow more than 64hz interrupts from the real-time clock + // Override max number of consoles on console allocation + // Override max number of keymaps + // Control memory reclaim behavior + CAP_SYS_RESOURCE = Cap(24) + + // Allow manipulation of system clock + // Allow irix_stime on mips + // Allow setting the real-time clock + CAP_SYS_TIME = Cap(25) + + // Allow configuration of tty devices + // Allow vhangup() of tty + CAP_SYS_TTY_CONFIG = Cap(26) + + // Allow the privileged aspects of mknod() + CAP_MKNOD = Cap(27) + + // Allow taking of leases on files + CAP_LEASE = Cap(28) + + CAP_AUDIT_WRITE = Cap(29) + CAP_AUDIT_CONTROL = Cap(30) + CAP_SETFCAP = Cap(31) + + // Override MAC access. + // The base kernel enforces no MAC policy. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based overrides of that policy, this is + // the capability it should use to do so. + CAP_MAC_OVERRIDE = Cap(32) + + // Allow MAC configuration or state changes. + // The base kernel requires no MAC configuration. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based checks on modifications to that + // policy or the data required to maintain it, this is the + // capability it should use to do so. + CAP_MAC_ADMIN = Cap(33) + + // Allow configuring the kernel's syslog (printk behaviour) + CAP_SYSLOG = Cap(34) + + // Allow triggering something that will wake the system + CAP_WAKE_ALARM = Cap(35) + + // Allow preventing system suspends + CAP_BLOCK_SUSPEND = Cap(36) + + // Allow reading the audit log via multicast netlink socket + CAP_AUDIT_READ = Cap(37) + + // Allow system performance and observability privileged operations + // using perf_events, i915_perf and other kernel subsystems + CAP_PERFMON = Cap(38) + + // CAP_BPF allows the following BPF operations: + // - Creating all types of BPF maps + // - Advanced verifier features + // - Indirect variable access + // - Bounded loops + // - BPF to BPF function calls + // - Scalar precision tracking + // - Larger complexity limits + // - Dead code elimination + // - And potentially other features + // - Loading BPF Type Format (BTF) data + // - Retrieve xlated and JITed code of BPF programs + // - Use bpf_spin_lock() helper + // + // CAP_PERFMON relaxes the verifier checks further: + // - BPF progs can use of pointer-to-integer conversions + // - speculation attack hardening measures are bypassed + // - bpf_probe_read to read arbitrary kernel memory is allowed + // - bpf_trace_printk to print kernel memory is allowed + // + // CAP_SYS_ADMIN is required to use bpf_probe_write_user. + // + // CAP_SYS_ADMIN is required to iterate system wide loaded + // programs, maps, links, BTFs and convert their IDs to file descriptors. + // + // CAP_PERFMON and CAP_BPF are required to load tracing programs. + // CAP_NET_ADMIN and CAP_BPF are required to load networking programs. + CAP_BPF = Cap(39) + + // Allow checkpoint/restore related operations. + // Introduced in kernel 5.9 + CAP_CHECKPOINT_RESTORE = Cap(40) +) + +// List returns the list of all capabilities known to the package. +// +// Deprecated: use [ListKnown] or [ListSupported] instead. +func List() []Cap { + return ListKnown() +} + +// ListKnown returns the list of all capabilities known to the package. +func ListKnown() []Cap { + return list() +} + +// ListSupported retuns the list of all capabilities known to the package, +// except those that are not supported by the currently running Linux kernel. +func ListSupported() ([]Cap, error) { + last, err := LastCap() + if err != nil { + return nil, err + } + return slices.DeleteFunc(list(), func(c Cap) bool { + // Remove caps not supported by the kernel. + return c > last + }), nil +} diff --git a/vendor/github.com/moby/sys/capability/enum_gen.go b/vendor/github.com/moby/sys/capability/enum_gen.go new file mode 100644 index 000000000..f72cd43a6 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/enum_gen.go @@ -0,0 +1,137 @@ +// Code generated by go generate; DO NOT EDIT. + +package capability + +func (c Cap) String() string { + switch c { + case CAP_CHOWN: + return "chown" + case CAP_DAC_OVERRIDE: + return "dac_override" + case CAP_DAC_READ_SEARCH: + return "dac_read_search" + case CAP_FOWNER: + return "fowner" + case CAP_FSETID: + return "fsetid" + case CAP_KILL: + return "kill" + case CAP_SETGID: + return "setgid" + case CAP_SETUID: + return "setuid" + case CAP_SETPCAP: + return "setpcap" + case CAP_LINUX_IMMUTABLE: + return "linux_immutable" + case CAP_NET_BIND_SERVICE: + return "net_bind_service" + case CAP_NET_BROADCAST: + return "net_broadcast" + case CAP_NET_ADMIN: + return "net_admin" + case CAP_NET_RAW: + return "net_raw" + case CAP_IPC_LOCK: + return "ipc_lock" + case CAP_IPC_OWNER: + return "ipc_owner" + case CAP_SYS_MODULE: + return "sys_module" + case CAP_SYS_RAWIO: + return "sys_rawio" + case CAP_SYS_CHROOT: + return "sys_chroot" + case CAP_SYS_PTRACE: + return "sys_ptrace" + case CAP_SYS_PACCT: + return "sys_pacct" + case CAP_SYS_ADMIN: + return "sys_admin" + case CAP_SYS_BOOT: + return "sys_boot" + case CAP_SYS_NICE: + return "sys_nice" + case CAP_SYS_RESOURCE: + return "sys_resource" + case CAP_SYS_TIME: + return "sys_time" + case CAP_SYS_TTY_CONFIG: + return "sys_tty_config" + case CAP_MKNOD: + return "mknod" + case CAP_LEASE: + return "lease" + case CAP_AUDIT_WRITE: + return "audit_write" + case CAP_AUDIT_CONTROL: + return "audit_control" + case CAP_SETFCAP: + return "setfcap" + case CAP_MAC_OVERRIDE: + return "mac_override" + case CAP_MAC_ADMIN: + return "mac_admin" + case CAP_SYSLOG: + return "syslog" + case CAP_WAKE_ALARM: + return "wake_alarm" + case CAP_BLOCK_SUSPEND: + return "block_suspend" + case CAP_AUDIT_READ: + return "audit_read" + case CAP_PERFMON: + return "perfmon" + case CAP_BPF: + return "bpf" + case CAP_CHECKPOINT_RESTORE: + return "checkpoint_restore" + } + return "unknown" +} + +func list() []Cap { + return []Cap{ + CAP_CHOWN, + CAP_DAC_OVERRIDE, + CAP_DAC_READ_SEARCH, + CAP_FOWNER, + CAP_FSETID, + CAP_KILL, + CAP_SETGID, + CAP_SETUID, + CAP_SETPCAP, + CAP_LINUX_IMMUTABLE, + CAP_NET_BIND_SERVICE, + CAP_NET_BROADCAST, + CAP_NET_ADMIN, + CAP_NET_RAW, + CAP_IPC_LOCK, + CAP_IPC_OWNER, + CAP_SYS_MODULE, + CAP_SYS_RAWIO, + CAP_SYS_CHROOT, + CAP_SYS_PTRACE, + CAP_SYS_PACCT, + CAP_SYS_ADMIN, + CAP_SYS_BOOT, + CAP_SYS_NICE, + CAP_SYS_RESOURCE, + CAP_SYS_TIME, + CAP_SYS_TTY_CONFIG, + CAP_MKNOD, + CAP_LEASE, + CAP_AUDIT_WRITE, + CAP_AUDIT_CONTROL, + CAP_SETFCAP, + CAP_MAC_OVERRIDE, + CAP_MAC_ADMIN, + CAP_SYSLOG, + CAP_WAKE_ALARM, + CAP_BLOCK_SUSPEND, + CAP_AUDIT_READ, + CAP_PERFMON, + CAP_BPF, + CAP_CHECKPOINT_RESTORE, + } +} diff --git a/vendor/github.com/moby/sys/capability/syscall_linux.go b/vendor/github.com/moby/sys/capability/syscall_linux.go new file mode 100644 index 000000000..d6b6932a9 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/syscall_linux.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package capability + +import ( + "syscall" + "unsafe" +) + +type capHeader struct { + version uint32 + pid int32 +} + +type capData struct { + effective uint32 + permitted uint32 + inheritable uint32 +} + +func capget(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func capset(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +// not yet in syscall +const ( + pr_CAP_AMBIENT = 47 + pr_CAP_AMBIENT_IS_SET = uintptr(1) + pr_CAP_AMBIENT_RAISE = uintptr(2) + pr_CAP_AMBIENT_LOWER = uintptr(3) + pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) +) + +func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { + _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) + if e1 != 0 { + err = e1 + } + return +} + +const ( + vfsXattrName = "security.capability" + + vfsCapVerMask = 0xff000000 + vfsCapVer1 = 0x01000000 + vfsCapVer2 = 0x02000000 + + vfsCapFlagMask = ^vfsCapVerMask + vfsCapFlageffective = 0x000001 + + vfscapDataSizeV1 = 4 * (1 + 2*1) + vfscapDataSizeV2 = 4 * (1 + 2*2) +) + +type vfscapData struct { + magic uint32 + data [2]struct { + permitted uint32 + inheritable uint32 + } + effective [2]uint32 + version int8 +} + +var _vfsXattrName *byte + +func init() { + _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) +} + +func getVfsCap(path string, dest *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) + if e1 != 0 { + if e1 == syscall.ENODATA { + dest.version = 2 + return + } + err = e1 + } + switch dest.magic & vfsCapVerMask { + case vfsCapVer1: + dest.version = 1 + if r0 != vfscapDataSizeV1 { + return syscall.EINVAL + } + dest.data[1].permitted = 0 + dest.data[1].inheritable = 0 + case vfsCapVer2: + dest.version = 2 + if r0 != vfscapDataSizeV2 { + return syscall.EINVAL + } + default: + return syscall.EINVAL + } + if dest.magic&vfsCapFlageffective != 0 { + dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable + dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable + } else { + dest.effective[0] = 0 + dest.effective[1] = 0 + } + return +} + +func setVfsCap(path string, data *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var size uintptr + if data.version == 1 { + data.magic = vfsCapVer1 + size = vfscapDataSizeV1 + } else if data.version == 2 { + data.magic = vfsCapVer2 + if data.effective[0] != 0 || data.effective[1] != 0 { + data.magic |= vfsCapFlageffective + } + size = vfscapDataSizeV2 + } else { + return syscall.EINVAL + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go index a3c7340e3..278cdfb07 100644 --- a/vendor/github.com/moby/sys/sequential/sequential_unix.go +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -5,41 +5,22 @@ package sequential import "os" -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is an alias for [os.Create] on non-Windows platforms. func Create(name string) (*os.File, error) { return os.Create(name) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is an alias for [os.Open] on non-Windows platforms. func Open(name string) (*os.File, error) { return os.Open(name) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. +// OpenFile is an alias for [os.OpenFile] on non-Windows platforms. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } -// CreateTemp creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is an alias for [os.CreateTemp] on non-Windows platforms. func CreateTemp(dir, prefix string) (f *os.File, err error) { return os.CreateTemp(dir, prefix) } diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go index 3f7f0d83e..3500ecc68 100644 --- a/vendor/github.com/moby/sys/sequential/sequential_windows.go +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -5,48 +5,52 @@ import ( "path/filepath" "strconv" "sync" - "syscall" "time" "unsafe" "golang.org/x/sys/windows" ) -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is a copy of [os.Create], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Create(name string) (*os.File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) + return openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is a copy of [os.Open], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Open(name string) (*os.File, error) { - return OpenFile(name, os.O_RDONLY, 0) + return openFileSequential(name, windows.O_RDONLY) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. +// OpenFile is a copy of [os.OpenFile], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, err := openFileSequential(name, flag, 0) - if err == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: err} + return openFileSequential(name, flag) } -func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) +func openFileSequential(name string, flag int) (file *os.File, err error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: windows.ERROR_FILE_NOT_FOUND} + } + r, e := openSequential(name, flag|windows.O_CLOEXEC) if e != nil { - return nil, e + return nil, &os.PathError{Op: "open", Path: name, Err: e} } return os.NewFile(uintptr(r), name), nil } @@ -58,7 +62,7 @@ func makeInheritSa() *windows.SecurityAttributes { return &sa } -func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { +func openSequential(path string, mode int) (fd windows.Handle, err error) { if len(path) == 0 { return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } @@ -101,15 +105,16 @@ func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err err createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, windows.FILE_FLAG_SEQUENTIAL_SCAN, 0) return h, e } // Helpers for CreateTemp -var rand uint32 -var randmu sync.Mutex +var ( + rand uint32 + randmu sync.Mutex +) func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -127,17 +132,13 @@ func nextSuffix() string { return strconv.Itoa(int(1e9 + r%1e9))[1:] } -// CreateTemp is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is a copy of [os.CreateTemp], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func CreateTemp(dir, prefix string) (f *os.File, err error) { if dir == "" { dir = os.TempDir() @@ -146,7 +147,7 @@ func CreateTemp(dir, prefix string) (f *os.File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + f, err = openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_EXCL) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() diff --git a/vendor/github.com/moby/sys/userns/LICENSE b/vendor/github.com/moby/sys/userns/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/moby/sys/userns/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/sys/userns/userns.go b/vendor/github.com/moby/sys/userns/userns.go new file mode 100644 index 000000000..56b24c44a --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns.go @@ -0,0 +1,16 @@ +// Package userns provides utilities to detect whether we are currently running +// in a Linux user namespace. +// +// This code was migrated from [libcontainer/runc], which based its implementation +// on code from [lcx/incus]. +// +// [libcontainer/runc]: https://github.com/opencontainers/runc/blob/3778ae603c706494fd1e2c2faf83b406e38d687d/libcontainer/userns/userns_linux.go#L12-L49 +// [lcx/incus]: https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700 +package userns + +// RunningInUserNS detects whether we are currently running in a Linux +// user namespace and memoizes the result. It returns false on non-Linux +// platforms. +func RunningInUserNS() bool { + return inUserNS() +} diff --git a/vendor/github.com/moby/sys/userns/userns_linux.go b/vendor/github.com/moby/sys/userns/userns_linux.go new file mode 100644 index 000000000..87c1c38ee --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_linux.go @@ -0,0 +1,53 @@ +package userns + +import ( + "bufio" + "fmt" + "os" + "sync" +) + +var inUserNS = sync.OnceValue(runningInUserNS) + +// runningInUserNS detects whether we are currently running in a user namespace. +// +// This code was migrated from [libcontainer/runc] and based on an implementation +// from [lcx/incus]. +// +// [libcontainer/runc]: https://github.com/opencontainers/runc/blob/3778ae603c706494fd1e2c2faf83b406e38d687d/libcontainer/userns/userns_linux.go#L12-L49 +// [lcx/incus]: https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700 +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported. + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + return uidMapInUserNS(string(l)) +} + +func uidMapInUserNS(uidMap string) bool { + if uidMap == "" { + // File exist but empty (the initial state when userns is created, + // see user_namespaces(7)). + return true + } + + var a, b, c int64 + if _, err := fmt.Sscanf(uidMap, "%d %d %d", &a, &b, &c); err != nil { + // Assume we are in a regular, non user namespace. + return false + } + + // As per user_namespaces(7), /proc/self/uid_map of + // the initial user namespace shows 0 0 4294967295. + initNS := a == 0 && b == 0 && c == 4294967295 + return !initNS +} diff --git a/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go b/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go new file mode 100644 index 000000000..26ba2e16e --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go @@ -0,0 +1,8 @@ +//go:build linux && gofuzz + +package userns + +func FuzzUIDMap(uidmap []byte) int { + _ = uidMapInUserNS(string(uidmap)) + return 1 +} diff --git a/vendor/github.com/moby/sys/userns/userns_unsupported.go b/vendor/github.com/moby/sys/userns/userns_unsupported.go new file mode 100644 index 000000000..8ed83072c --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_unsupported.go @@ -0,0 +1,6 @@ +//go:build !linux + +package userns + +// inUserNS is a stub for non-Linux systems. Always returns false. +func inUserNS() bool { return false } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go index f61a56015..e49e6d53f 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -120,10 +120,7 @@ func Relabel(path string, fileLabel string, shared bool) error { c["level"] = "s0" fileLabel = c.Get() } - if err := selinux.Chcon(path, fileLabel, true); err != nil { - return err - } - return nil + return selinux.Chcon(path, fileLabel, true) } // DisableSecOpt returns a security opt that can disable labeling diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go index f21c80c5a..1c260cb27 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -6,25 +6,25 @@ package label // InitLabels returns the process label and file labels to be used within // the container. A list of options can be passed into this function to alter // the labels. -func InitLabels(options []string) (string, string, error) { +func InitLabels([]string) (string, string, error) { return "", "", nil } // Deprecated: The GenLabels function is only to be used during the transition // to the official API. Use InitLabels(strings.Fields(options)) instead. -func GenLabels(options string) (string, string, error) { +func GenLabels(string) (string, string, error) { return "", "", nil } -func SetFileLabel(path string, fileLabel string) error { +func SetFileLabel(string, string) error { return nil } -func SetFileCreateLabel(fileLabel string) error { +func SetFileCreateLabel(string) error { return nil } -func Relabel(path string, fileLabel string, shared bool) error { +func Relabel(string, string, bool) error { return nil } @@ -35,16 +35,16 @@ func DisableSecOpt() []string { } // Validate checks that the label does not include unexpected options -func Validate(label string) error { +func Validate(string) error { return nil } // RelabelNeeded checks whether the user requested a relabel -func RelabelNeeded(label string) bool { +func RelabelNeeded(string) bool { return false } // IsShared checks that the label includes a "shared" mark -func IsShared(label string) bool { +func IsShared(string) bool { return false } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index f1e95977d..c80c10971 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -132,7 +132,7 @@ func verifySELinuxfsMount(mnt string) bool { if err == nil { break } - if err == unix.EAGAIN || err == unix.EINTR { //nolint:errorlint // unix errors are bare + if err == unix.EAGAIN || err == unix.EINTR { continue } return false @@ -263,7 +263,7 @@ func isProcHandle(fh *os.File) error { if err == nil { break } - if err != unix.EINTR { //nolint:errorlint // unix errors are bare + if err != unix.EINTR { return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err} } } @@ -328,8 +328,8 @@ func lSetFileLabel(fpath string, label string) error { if err == nil { break } - if err != unix.EINTR { //nolint:errorlint // unix errors are bare - return &os.PathError{Op: "lsetxattr", Path: fpath, Err: err} + if err != unix.EINTR { + return &os.PathError{Op: fmt.Sprintf("lsetxattr(label=%s)", label), Path: fpath, Err: err} } } @@ -347,8 +347,8 @@ func setFileLabel(fpath string, label string) error { if err == nil { break } - if err != unix.EINTR { //nolint:errorlint // unix errors are bare - return &os.PathError{Op: "setxattr", Path: fpath, Err: err} + if err != unix.EINTR { + return &os.PathError{Op: fmt.Sprintf("setxattr(label=%s)", label), Path: fpath, Err: err} } } @@ -639,6 +639,7 @@ func (m mlsRange) String() string { return low + "-" + high } +// TODO: remove min and max once Go < 1.21 is not supported. func max(a, b uint) uint { if a > b { return a @@ -1134,7 +1135,7 @@ func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity } return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { if fastMode { - if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + if cLabel, err := lFileLabel(p); err == nil && cLabel == label { return nil } } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index bc3fd3b37..0889fbe0e 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -7,7 +7,7 @@ func attrPath(string) string { return "" } -func readCon(fpath string) (string, error) { +func readCon(string) (string, error) { return "", nil } @@ -21,27 +21,27 @@ func getEnabled() bool { return false } -func classIndex(class string) (int, error) { +func classIndex(string) (int, error) { return -1, nil } -func setFileLabel(fpath string, label string) error { +func setFileLabel(string, string) error { return nil } -func lSetFileLabel(fpath string, label string) error { +func lSetFileLabel(string, string) error { return nil } -func fileLabel(fpath string) (string, error) { +func fileLabel(string) (string, error) { return "", nil } -func lFileLabel(fpath string) (string, error) { +func lFileLabel(string) (string, error) { return "", nil } -func setFSCreateLabel(label string) error { +func setFSCreateLabel(string) error { return nil } @@ -53,7 +53,7 @@ func currentLabel() (string, error) { return "", nil } -func pidLabel(pid int) (string, error) { +func pidLabel(int) (string, error) { return "", nil } @@ -61,23 +61,23 @@ func execLabel() (string, error) { return "", nil } -func canonicalizeContext(val string) (string, error) { +func canonicalizeContext(string) (string, error) { return "", nil } -func computeCreateContext(source string, target string, class string) (string, error) { +func computeCreateContext(string, string, string) (string, error) { return "", nil } -func calculateGlbLub(sourceRange, targetRange string) (string, error) { +func calculateGlbLub(string, string) (string, error) { return "", nil } -func peerLabel(fd uintptr) (string, error) { +func peerLabel(uintptr) (string, error) { return "", nil } -func setKeyLabel(label string) error { +func setKeyLabel(string) error { return nil } @@ -85,14 +85,14 @@ func (c Context) get() string { return "" } -func newContext(label string) (Context, error) { +func newContext(string) (Context, error) { return Context{}, nil } func clearLabels() { } -func reserveLabel(label string) { +func reserveLabel(string) { } func isMLSEnabled() bool { @@ -103,7 +103,7 @@ func enforceMode() int { return Disabled } -func setEnforceMode(mode int) error { +func setEnforceMode(int) error { return nil } @@ -111,7 +111,7 @@ func defaultEnforceMode() int { return Disabled } -func releaseLabel(label string) { +func releaseLabel(string) { } func roFileLabel() string { @@ -126,27 +126,27 @@ func initContainerLabels() (string, string) { return "", "" } -func containerLabels() (processLabel string, fileLabel string) { +func containerLabels() (string, string) { return "", "" } -func securityCheckContext(val string) error { +func securityCheckContext(string) error { return nil } -func copyLevel(src, dest string) (string, error) { +func copyLevel(string, string) (string, error) { return "", nil } -func chcon(fpath string, label string, recurse bool) error { +func chcon(string, string, bool) error { return nil } -func dupSecOpt(src string) ([]string, error) { +func dupSecOpt(string) ([]string, error) { return nil, nil } -func getDefaultContextWithLevel(user, level, scon string) (string, error) { +func getDefaultContextWithLevel(string, string, string) (string, error) { return "", nil } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go index 9e473ca16..559c85107 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go @@ -31,7 +31,7 @@ func lgetxattr(path, attr string) ([]byte, error) { func doLgetxattr(path, attr string, dest []byte) (int, error) { for { sz, err := unix.Lgetxattr(path, attr, dest) - if err != unix.EINTR { //nolint:errorlint // unix errors are bare + if err != unix.EINTR { return sz, err } } @@ -64,7 +64,7 @@ func getxattr(path, attr string) ([]byte, error) { func dogetxattr(path, attr string, dest []byte) (int, error) { for { sz, err := unix.Getxattr(path, attr, dest) - if err != unix.EINTR { //nolint:errorlint // unix errors are bare + if err != unix.EINTR { return sz, err } } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md index 068ac4005..b827e7dd7 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md @@ -28,7 +28,9 @@ Please note the following limitations of this code: * fs.SkipDir is not supported; - * no errors are ever passed to WalkDirFunc; + * ErrNotExist errors from filepath.WalkDir are silently ignored for any path + except the top directory (WalkDir argument); any other error is returned to + the caller of WalkDir; * once any error is returned from any walkDirFunc instance, no more calls to WalkDirFunc are made, and the error is returned to the caller of WalkDir; @@ -51,4 +53,4 @@ filepath.WalkDir. Otherwise (if a WalkDirFunc is actually doing something) this is usually faster, except when the WalkDirN(..., 1) is used. Run `go test -bench .` to see how different operations can benefit from it, as well as how the -level of paralellism affects the speed. +level of parallelism affects the speed. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go index 0f5d9f580..5d2d09a29 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go @@ -4,6 +4,7 @@ package pwalkdir import ( + "errors" "fmt" "io/fs" "path/filepath" @@ -60,6 +61,12 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error { go func() { err = filepath.WalkDir(root, func(p string, entry fs.DirEntry, err error) error { if err != nil { + // Walking a file tree can race with removal, + // so ignore ENOENT, except for root. + // https://github.com/opencontainers/selinux/issues/199. + if errors.Is(err, fs.ErrNotExist) && len(p) != rootLen { + return nil + } close(files) return err } diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml index b2abfa40a..da9138bb8 100644 --- a/vendor/github.com/openshift/imagebuilder/.travis.yml +++ b/vendor/github.com/openshift/imagebuilder/.travis.yml @@ -6,7 +6,7 @@ services: - docker go: - - "1.20" + - "1.21.13" before_install: - sudo systemctl stop docker.service && sudo systemctl stop docker.socket diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index 6e4f242ce..2a4f45433 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -18,9 +18,10 @@ import ( docker "github.com/fsouza/go-dockerclient" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/platforms" + "github.com/containerd/errdefs" + "github.com/containerd/platforms" "github.com/containers/storage/pkg/regexp" + "github.com/openshift/imagebuilder/internal" "github.com/openshift/imagebuilder/signal" "github.com/openshift/imagebuilder/strslice" @@ -143,7 +144,7 @@ func processHereDocs(instruction, originalInstruction string, heredocs []buildki shlex := buildkitshell.NewLex('\\') shlex.RawQuotes = true shlex.RawEscapes = true - content, err = shlex.ProcessWord(content, args) + content, _, err = shlex.ProcessWord(content, internal.EnvironmentSlice(args)) if err != nil { return nil, err } diff --git a/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go b/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go index f8cb979a8..5e4f4a04b 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go +++ b/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go @@ -18,6 +18,7 @@ import ( buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser" buildkitshell "github.com/moby/buildkit/frontend/dockerfile/shell" "github.com/openshift/imagebuilder/dockerfile/command" + "github.com/openshift/imagebuilder/internal" ) // Node is a structure used to represent a parse tree. @@ -408,7 +409,7 @@ func heredocsFromLine(line string) ([]buildkitparser.Heredoc, error) { shlex.RawQuotes = true shlex.RawEscapes = true shlex.SkipUnsetEnv = true - words, _ := shlex.ProcessWords(line, []string{}) + words, _ := shlex.ProcessWords(line, internal.EnvironmentSlice([]string{})) var docs []buildkitparser.Heredoc for _, word := range words { diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec index 9fc2bc102..d388aa912 100644 --- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec +++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec @@ -12,7 +12,7 @@ # %global golang_version 1.19 -%{!?version: %global version 1.2.14} +%{!?version: %global version 1.2.15} %{!?release: %global release 1} %global package_name imagebuilder %global product_name Container Image Builder diff --git a/vendor/github.com/openshift/imagebuilder/internal/env.go b/vendor/github.com/openshift/imagebuilder/internal/env.go new file mode 100644 index 000000000..d72968dee --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/internal/env.go @@ -0,0 +1,23 @@ +package internal + +import "strings" + +type EnvironmentSlice []string + +func (e EnvironmentSlice) Keys() []string { + keys := make([]string, 0, len(e)) + for _, kv := range e { + k, _, _ := strings.Cut(kv, "=") + keys = append(keys, k) + } + return keys +} + +func (e EnvironmentSlice) Get(key string) (string, bool) { + for _, kv := range e { + if k, v, ok := strings.Cut(kv, "="); ok && k == key { + return v, true + } + } + return "", false +} diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go index 758cd4ff9..74ac03b78 100644 --- a/vendor/github.com/pkg/sftp/attrs.go +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -32,10 +32,10 @@ func (fi *fileInfo) Name() string { return fi.name } func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) } // Mode returns file mode bits. -func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) } +func (fi *fileInfo) Mode() os.FileMode { return fi.stat.FileMode() } // ModTime returns the last modification time of the file. -func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) } +func (fi *fileInfo) ModTime() time.Time { return fi.stat.ModTime() } // IsDir returns true if the file is a directory. func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } @@ -56,6 +56,21 @@ type FileStat struct { Extended []StatExtended } +// ModTime returns the Mtime SFTP file attribute converted to a time.Time +func (fs *FileStat) ModTime() time.Time { + return time.Unix(int64(fs.Mtime), 0) +} + +// AccessTime returns the Atime SFTP file attribute converted to a time.Time +func (fs *FileStat) AccessTime() time.Time { + return time.Unix(int64(fs.Atime), 0) +} + +// FileMode returns the Mode SFTP file attribute converted to an os.FileMode +func (fs *FileStat) FileMode() os.FileMode { + return toFileMode(fs.Mode) +} + // StatExtended contains additional, extended information for a FileStat. type StatExtended struct { ExtType string diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go index 371ae9b9b..96ffc03df 100644 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -1,5 +1,5 @@ -//go:build darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || aix || js -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js +//go:build darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || aix || js || zos +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js zos package sftp diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go index 0df125e15..83047d0a3 100644 --- a/vendor/github.com/pkg/sftp/client.go +++ b/vendor/github.com/pkg/sftp/client.go @@ -2,6 +2,7 @@ package sftp import ( "bytes" + "context" "encoding/binary" "errors" "fmt" @@ -256,7 +257,7 @@ func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Clie // read/write at the same time. For those services you will need to use // `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`. func (c *Client) Create(path string) (*File, error) { - return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) + return c.open(path, toPflags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) } const sftpProtocolVersion = 3 // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt @@ -321,19 +322,27 @@ func (c *Client) Walk(root string) *fs.Walker { return fs.WalkFS(root, c) } -// ReadDir reads the directory named by dirname and returns a list of -// directory entries. +// ReadDir reads the directory named by p +// and returns a list of directory entries. func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { - handle, err := c.opendir(p) + return c.ReadDirContext(context.Background(), p) +} + +// ReadDirContext reads the directory named by p +// and returns a list of directory entries. +// The passed context can be used to cancel the operation +// returning all entries listed up to the cancellation. +func (c *Client) ReadDirContext(ctx context.Context, p string) ([]os.FileInfo, error) { + handle, err := c.opendir(ctx, p) if err != nil { return nil, err } defer c.close(handle) // this has to defer earlier than the lock below - var attrs []os.FileInfo + var entries []os.FileInfo var done = false for !done { id := c.nextID() - typ, data, err1 := c.sendPacket(nil, &sshFxpReaddirPacket{ + typ, data, err1 := c.sendPacket(ctx, nil, &sshFxpReaddirPacket{ ID: id, Handle: handle, }) @@ -354,11 +363,14 @@ func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { filename, data = unmarshalString(data) _, data = unmarshalString(data) // discard longname var attr *FileStat - attr, data = unmarshalAttrs(data) + attr, data, err = unmarshalAttrs(data) + if err != nil { + return nil, err + } if filename == "." || filename == ".." { continue } - attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) + entries = append(entries, fileInfoFromStat(attr, path.Base(filename))) } case sshFxpStatus: // TODO(dfc) scope warning! @@ -371,12 +383,12 @@ func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { if err == io.EOF { err = nil } - return attrs, err + return entries, err } -func (c *Client) opendir(path string) (string, error) { +func (c *Client) opendir(ctx context.Context, path string) (string, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpOpendirPacket{ + typ, data, err := c.sendPacket(ctx, nil, &sshFxpOpendirPacket{ ID: id, Path: path, }) @@ -412,7 +424,7 @@ func (c *Client) Stat(p string) (os.FileInfo, error) { // If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. func (c *Client) Lstat(p string) (os.FileInfo, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpLstatPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpLstatPacket{ ID: id, Path: p, }) @@ -425,7 +437,11 @@ func (c *Client) Lstat(p string) (os.FileInfo, error) { if sid != id { return nil, &unexpectedIDErr{id, sid} } - attr, _ := unmarshalAttrs(data) + attr, _, err := unmarshalAttrs(data) + if err != nil { + // avoid returning a valid value from fileInfoFromStats if err != nil. + return nil, err + } return fileInfoFromStat(attr, path.Base(p)), nil case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) @@ -437,7 +453,7 @@ func (c *Client) Lstat(p string) (os.FileInfo, error) { // ReadLink reads the target of a symbolic link. func (c *Client) ReadLink(p string) (string, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpReadlinkPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpReadlinkPacket{ ID: id, Path: p, }) @@ -466,7 +482,7 @@ func (c *Client) ReadLink(p string) (string, error) { // Link creates a hard link at 'newname', pointing at the same inode as 'oldname' func (c *Client) Link(oldname, newname string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpHardlinkPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpHardlinkPacket{ ID: id, Oldpath: oldname, Newpath: newname, @@ -485,7 +501,7 @@ func (c *Client) Link(oldname, newname string) error { // Symlink creates a symbolic link at 'newname', pointing at target 'oldname' func (c *Client) Symlink(oldname, newname string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpSymlinkPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpSymlinkPacket{ ID: id, Linkpath: newname, Targetpath: oldname, @@ -501,9 +517,9 @@ func (c *Client) Symlink(oldname, newname string) error { } } -func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error { +func (c *Client) fsetstat(handle string, flags uint32, attrs interface{}) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpFsetstatPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpFsetstatPacket{ ID: id, Handle: handle, Flags: flags, @@ -523,7 +539,7 @@ func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error // setstat is a convience wrapper to allow for changing of various parts of the file descriptor. func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpSetstatPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpSetstatPacket{ ID: id, Path: path, Flags: flags, @@ -577,23 +593,37 @@ func (c *Client) Truncate(path string, size int64) error { return c.setstat(path, sshFileXferAttrSize, uint64(size)) } +// SetExtendedData sets extended attributes of the named file. It uses the +// SSH_FILEXFER_ATTR_EXTENDED flag in the setstat request. +// +// This flag provides a general extension mechanism for vendor-specific extensions. +// Names of the attributes should be a string of the format "name@domain", where "domain" +// is a valid, registered domain name and "name" identifies the method. Server +// implementations SHOULD ignore extended data fields that they do not understand. +func (c *Client) SetExtendedData(path string, extended []StatExtended) error { + attrs := &FileStat{ + Extended: extended, + } + return c.setstat(path, sshFileXferAttrExtended, attrs) +} + // Open opens the named file for reading. If successful, methods on the // returned file can be used for reading; the associated file descriptor // has mode O_RDONLY. func (c *Client) Open(path string) (*File, error) { - return c.open(path, flags(os.O_RDONLY)) + return c.open(path, toPflags(os.O_RDONLY)) } // OpenFile is the generalized open call; most users will use Open or // Create instead. It opens the named file with specified flag (O_RDONLY // etc.). If successful, methods on the returned File can be used for I/O. func (c *Client) OpenFile(path string, f int) (*File, error) { - return c.open(path, flags(f)) + return c.open(path, toPflags(f)) } func (c *Client) open(path string, pflags uint32) (*File, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpOpenPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpOpenPacket{ ID: id, Path: path, Pflags: pflags, @@ -621,7 +651,7 @@ func (c *Client) open(path string, pflags uint32) (*File, error) { // immediately after this request has been sent. func (c *Client) close(handle string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpClosePacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpClosePacket{ ID: id, Handle: handle, }) @@ -638,7 +668,7 @@ func (c *Client) close(handle string) error { func (c *Client) stat(path string) (*FileStat, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpStatPacket{ ID: id, Path: path, }) @@ -651,8 +681,8 @@ func (c *Client) stat(path string) (*FileStat, error) { if sid != id { return nil, &unexpectedIDErr{id, sid} } - attr, _ := unmarshalAttrs(data) - return attr, nil + attr, _, err := unmarshalAttrs(data) + return attr, err case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) default: @@ -662,7 +692,7 @@ func (c *Client) stat(path string) (*FileStat, error) { func (c *Client) fstat(handle string) (*FileStat, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpFstatPacket{ ID: id, Handle: handle, }) @@ -675,8 +705,8 @@ func (c *Client) fstat(handle string) (*FileStat, error) { if sid != id { return nil, &unexpectedIDErr{id, sid} } - attr, _ := unmarshalAttrs(data) - return attr, nil + attr, _, err := unmarshalAttrs(data) + return attr, err case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) default: @@ -691,7 +721,7 @@ func (c *Client) fstat(handle string) (*FileStat, error) { func (c *Client) StatVFS(path string) (*StatVFS, error) { // send the StatVFS packet to the server id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpStatvfsPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpStatvfsPacket{ ID: id, Path: path, }) @@ -746,7 +776,7 @@ func (c *Client) Remove(path string) error { func (c *Client) removeFile(path string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRemovePacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpRemovePacket{ ID: id, Filename: path, }) @@ -764,7 +794,7 @@ func (c *Client) removeFile(path string) error { // RemoveDirectory removes a directory path. func (c *Client) RemoveDirectory(path string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRmdirPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpRmdirPacket{ ID: id, Path: path, }) @@ -782,7 +812,7 @@ func (c *Client) RemoveDirectory(path string) error { // Rename renames a file. func (c *Client) Rename(oldname, newname string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRenamePacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpRenamePacket{ ID: id, Oldpath: oldname, Newpath: newname, @@ -802,7 +832,7 @@ func (c *Client) Rename(oldname, newname string) error { // which will replace newname if it already exists. func (c *Client) PosixRename(oldname, newname string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpPosixRenamePacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpPosixRenamePacket{ ID: id, Oldpath: oldname, Newpath: newname, @@ -824,7 +854,7 @@ func (c *Client) PosixRename(oldname, newname string) error { // or relative pathnames without a leading slash into absolute paths. func (c *Client) RealPath(path string) (string, error) { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRealpathPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpRealpathPacket{ ID: id, Path: path, }) @@ -861,7 +891,7 @@ func (c *Client) Getwd() (string, error) { // parent folder does not exist (the method cannot create complete paths). func (c *Client) Mkdir(path string) error { id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpMkdirPacket{ + typ, data, err := c.sendPacket(context.Background(), nil, &sshFxpMkdirPacket{ ID: id, Path: path, }) @@ -965,18 +995,34 @@ func (c *Client) RemoveAll(path string) error { // File represents a remote file. type File struct { - c *Client - path string - handle string + c *Client + path string - mu sync.Mutex + mu sync.RWMutex + handle string offset int64 // current offset within remote file } // Close closes the File, rendering it unusable for I/O. It returns an // error, if any. func (f *File) Close() error { - return f.c.close(f.handle) + f.mu.Lock() + defer f.mu.Unlock() + + if f.handle == "" { + return os.ErrClosed + } + + // The design principle here is that when `openssh-portable/sftp-server.c` is doing `handle_close`, + // it will unconditionally mark the handle as unused, + // so we need to also unconditionally mark this handle as invalid. + // By invalidating our local copy of the handle, + // we ensure that there cannot be any erroneous use-after-close requests sent after Close. + + handle := f.handle + f.handle = "" + + return f.c.close(handle) } // Name returns the name of the file as presented to Open or Create. @@ -997,7 +1043,7 @@ func (f *File) Read(b []byte) (int, error) { f.mu.Lock() defer f.mu.Unlock() - n, err := f.ReadAt(b, f.offset) + n, err := f.readAt(b, f.offset) f.offset += int64(n) return n, err } @@ -1007,7 +1053,7 @@ func (f *File) Read(b []byte) (int, error) { func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) { for err == nil && n < len(b) { id := f.c.nextID() - typ, data, err := f.c.sendPacket(ch, &sshFxpReadPacket{ + typ, data, err := f.c.sendPacket(context.Background(), ch, &sshFxpReadPacket{ ID: id, Handle: f.handle, Offset: uint64(off) + uint64(n), @@ -1062,6 +1108,19 @@ func (f *File) readAtSequential(b []byte, off int64) (read int, err error) { // the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics, // so the file offset is not altered during the read. func (f *File) ReadAt(b []byte, off int64) (int, error) { + f.mu.RLock() + defer f.mu.RUnlock() + + return f.readAt(b, off) +} + +// readAt must be called while holding either the Read or Write mutex in File. +// This code is concurrent safe with itself, but not with Close. +func (f *File) readAt(b []byte, off int64) (int, error) { + if f.handle == "" { + return 0, os.ErrClosed + } + if len(b) <= f.c.maxPacket { // This should be able to be serviced with 1/2 requests. // So, just do it directly. @@ -1179,7 +1238,9 @@ func (f *File) ReadAt(b []byte, off int64) (int, error) { if err != nil { // return the offset as the start + how much we read before the error. errCh <- rErr{packet.off + int64(n), err} - return + + // DO NOT return. + // We want to ensure that workCh is drained before wg.Wait returns. } } }() @@ -1258,6 +1319,10 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { f.mu.Lock() defer f.mu.Unlock() + if f.handle == "" { + return 0, os.ErrClosed + } + if f.c.disableConcurrentReads { return f.writeToSequential(w) } @@ -1405,12 +1470,10 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { select { case readWork.cur <- writeWork: case <-cancel: - return } - if err != nil { - return - } + // DO NOT return. + // We want to ensure that readCh is drained before wg.Wait returns. } }() } @@ -1450,6 +1513,17 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { // Stat returns the FileInfo structure describing file. If there is an // error. func (f *File) Stat() (os.FileInfo, error) { + f.mu.RLock() + defer f.mu.RUnlock() + + if f.handle == "" { + return nil, os.ErrClosed + } + + return f.stat() +} + +func (f *File) stat() (os.FileInfo, error) { fs, err := f.c.fstat(f.handle) if err != nil { return nil, err @@ -1469,13 +1543,17 @@ func (f *File) Write(b []byte) (int, error) { f.mu.Lock() defer f.mu.Unlock() - n, err := f.WriteAt(b, f.offset) + if f.handle == "" { + return 0, os.ErrClosed + } + + n, err := f.writeAt(b, f.offset) f.offset += int64(n) return n, err } func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) { - typ, data, err := f.c.sendPacket(ch, &sshFxpWritePacket{ + typ, data, err := f.c.sendPacket(context.Background(), ch, &sshFxpWritePacket{ ID: f.c.nextID(), Handle: f.handle, Offset: uint64(off), @@ -1627,6 +1705,19 @@ func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { // the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics, // so the file offset is not altered during the write. func (f *File) WriteAt(b []byte, off int64) (written int, err error) { + f.mu.RLock() + defer f.mu.RUnlock() + + if f.handle == "" { + return 0, os.ErrClosed + } + + return f.writeAt(b, off) +} + +// writeAt must be called while holding either the Read or Write mutex in File. +// This code is concurrent safe with itself, but not with Close. +func (f *File) writeAt(b []byte, off int64) (written int, err error) { if len(b) <= f.c.maxPacket { // We can do this in one write. return f.writeChunkAt(nil, b, off) @@ -1665,7 +1756,21 @@ func (f *File) WriteAt(b []byte, off int64) (written int, err error) { // Giving a concurrency of less than one will default to the Client’s max concurrency. // // Otherwise, the given concurrency will be capped by the Client's max concurrency. +// +// When one needs to guarantee concurrent reads/writes, this method is preferred +// over ReadFrom. func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) { + f.mu.Lock() + defer f.mu.Unlock() + + return f.readFromWithConcurrency(r, concurrency) +} + +func (f *File) readFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) { + if f.handle == "" { + return 0, os.ErrClosed + } + // Split the write into multiple maxPacket sized concurrent writes. // This allows writes with a suitably large reader // to transfer data at a much faster rate due to overlapping round trip times. @@ -1757,6 +1862,9 @@ func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64 if err != nil { errCh <- rwErr{work.off, err} + + // DO NOT return. + // We want to ensure that workCh is drained before wg.Wait returns. } } }() @@ -1811,10 +1919,26 @@ func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64 // This method is preferred over calling Write multiple times // to maximise throughput for transferring the entire file, // especially over high-latency links. +// +// To ensure concurrent writes, the given r needs to implement one of +// the following receiver methods: +// +// Len() int +// Size() int64 +// Stat() (os.FileInfo, error) +// +// or be an instance of [io.LimitedReader] to determine the number of possible +// concurrent requests. Otherwise, reads/writes are performed sequentially. +// ReadFromWithConcurrency can be used explicitly to guarantee concurrent +// processing of the reader. func (f *File) ReadFrom(r io.Reader) (int64, error) { f.mu.Lock() defer f.mu.Unlock() + if f.handle == "" { + return 0, os.ErrClosed + } + if f.c.useConcurrentWrites { var remain int64 switch r := r.(type) { @@ -1836,7 +1960,7 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) { if remain < 0 { // We can strongly assert that we want default max concurrency here. - return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests) + return f.readFromWithConcurrency(r, f.c.maxConcurrentRequests) } if remain > int64(f.c.maxPacket) { @@ -1851,7 +1975,7 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) { concurrency64 = int64(f.c.maxConcurrentRequests) } - return f.ReadFromWithConcurrency(r, int(concurrency64)) + return f.readFromWithConcurrency(r, int(concurrency64)) } } @@ -1894,12 +2018,16 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { f.mu.Lock() defer f.mu.Unlock() + if f.handle == "" { + return 0, os.ErrClosed + } + switch whence { case io.SeekStart: case io.SeekCurrent: offset += f.offset case io.SeekEnd: - fi, err := f.Stat() + fi, err := f.stat() if err != nil { return f.offset, err } @@ -1918,22 +2046,84 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { // Chown changes the uid/gid of the current file. func (f *File) Chown(uid, gid int) error { - return f.c.Chown(f.path, uid, gid) + f.mu.RLock() + defer f.mu.RUnlock() + + if f.handle == "" { + return os.ErrClosed + } + + return f.c.fsetstat(f.handle, sshFileXferAttrUIDGID, &FileStat{ + UID: uint32(uid), + GID: uint32(gid), + }) } // Chmod changes the permissions of the current file. // // See Client.Chmod for details. func (f *File) Chmod(mode os.FileMode) error { - return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode)) + f.mu.RLock() + defer f.mu.RUnlock() + + if f.handle == "" { + return os.ErrClosed + } + + return f.c.fsetstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode)) +} + +// SetExtendedData sets extended attributes of the current file. It uses the +// SSH_FILEXFER_ATTR_EXTENDED flag in the setstat request. +// +// This flag provides a general extension mechanism for vendor-specific extensions. +// Names of the attributes should be a string of the format "name@domain", where "domain" +// is a valid, registered domain name and "name" identifies the method. Server +// implementations SHOULD ignore extended data fields that they do not understand. +func (f *File) SetExtendedData(path string, extended []StatExtended) error { + f.mu.RLock() + defer f.mu.RUnlock() + + if f.handle == "" { + return os.ErrClosed + } + + attrs := &FileStat{ + Extended: extended, + } + + return f.c.fsetstat(f.handle, sshFileXferAttrExtended, attrs) +} + +// Truncate sets the size of the current file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +// We send a SSH_FXP_FSETSTAT here since we have a file handle +func (f *File) Truncate(size int64) error { + f.mu.RLock() + defer f.mu.RUnlock() + + if f.handle == "" { + return os.ErrClosed + } + + return f.c.fsetstat(f.handle, sshFileXferAttrSize, uint64(size)) } // Sync requests a flush of the contents of a File to stable storage. // // Sync requires the server to support the fsync@openssh.com extension. func (f *File) Sync() error { + f.mu.Lock() + defer f.mu.Unlock() + + if f.handle == "" { + return os.ErrClosed + } + id := f.c.nextID() - typ, data, err := f.c.sendPacket(nil, &sshFxpFsyncPacket{ + typ, data, err := f.c.sendPacket(context.Background(), nil, &sshFxpFsyncPacket{ ID: id, Handle: f.handle, }) @@ -1948,15 +2138,6 @@ func (f *File) Sync() error { } } -// Truncate sets the size of the current file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -// We send a SSH_FXP_FSETSTAT here since we have a file handle -func (f *File) Truncate(size int64) error { - return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size)) -} - // normaliseError normalises an error into a more standard form that can be // checked against stdlib errors like io.EOF or os.ErrNotExist. func normaliseError(err error) error { @@ -1981,15 +2162,14 @@ func normaliseError(err error) error { // flags converts the flags passed to OpenFile into ssh flags. // Unsupported flags are ignored. -func flags(f int) uint32 { +func toPflags(f int) uint32 { var out uint32 - switch f & os.O_WRONLY { - case os.O_WRONLY: - out |= sshFxfWrite + switch f & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { case os.O_RDONLY: out |= sshFxfRead - } - if f&os.O_RDWR == os.O_RDWR { + case os.O_WRONLY: + out |= sshFxfWrite + case os.O_RDWR: out |= sshFxfRead | sshFxfWrite } if f&os.O_APPEND == os.O_APPEND { @@ -2013,7 +2193,7 @@ func flags(f int) uint32 { // setuid, setgid and sticky in m, because we've historically supported those // bits, and we mask off any non-permission bits. func toChmodPerm(m os.FileMode) (perm uint32) { - const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX + const mask = os.ModePerm | os.FileMode(s_ISUID|s_ISGID|s_ISVTX) perm = uint32(m & mask) if m&os.ModeSetuid != 0 { diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go index 3bb2ba15f..93bc37bf7 100644 --- a/vendor/github.com/pkg/sftp/conn.go +++ b/vendor/github.com/pkg/sftp/conn.go @@ -1,6 +1,7 @@ package sftp import ( + "context" "encoding" "fmt" "io" @@ -128,14 +129,19 @@ type idmarshaler interface { encoding.BinaryMarshaler } -func (c *clientConn) sendPacket(ch chan result, p idmarshaler) (byte, []byte, error) { +func (c *clientConn) sendPacket(ctx context.Context, ch chan result, p idmarshaler) (byte, []byte, error) { if cap(ch) < 1 { ch = make(chan result, 1) } c.dispatchRequest(ch, p) - s := <-ch - return s.typ, s.data, s.err + + select { + case <-ctx.Done(): + return 0, nil, ctx.Err() + case s := <-ch: + return s.typ, s.data, s.err + } } // dispatchRequest should ideally only be called by race-detection tests outside of this file, diff --git a/vendor/github.com/pkg/sftp/errno_plan9.go b/vendor/github.com/pkg/sftp/errno_plan9.go new file mode 100644 index 000000000..cf9d39029 --- /dev/null +++ b/vendor/github.com/pkg/sftp/errno_plan9.go @@ -0,0 +1,42 @@ +package sftp + +import ( + "os" + "syscall" +) + +var EBADF = syscall.NewError("fd out of range or not open") + +func wrapPathError(filepath string, err error) error { + if errno, ok := err.(syscall.ErrorString); ok { + return &os.PathError{Path: filepath, Err: errno} + } + return err +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.ErrorString) uint32 { + switch errno { + case "": + return sshFxOk + case syscall.ENOENT: + return sshFxNoSuchFile + case syscall.EPERM: + return sshFxPermissionDenied + } + + return sshFxFailure +} + +func translateSyscallError(err error) (uint32, bool) { + switch e := err.(type) { + case syscall.ErrorString: + return translateErrno(e), true + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.ErrorString); ok { + return translateErrno(errno), true + } + } + return 0, false +} diff --git a/vendor/github.com/pkg/sftp/errno_posix.go b/vendor/github.com/pkg/sftp/errno_posix.go new file mode 100644 index 000000000..cd87e1b5c --- /dev/null +++ b/vendor/github.com/pkg/sftp/errno_posix.go @@ -0,0 +1,45 @@ +//go:build !plan9 +// +build !plan9 + +package sftp + +import ( + "os" + "syscall" +) + +const EBADF = syscall.EBADF + +func wrapPathError(filepath string, err error) error { + if errno, ok := err.(syscall.Errno); ok { + return &os.PathError{Path: filepath, Err: errno} + } + return err +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.Errno) uint32 { + switch errno { + case 0: + return sshFxOk + case syscall.ENOENT: + return sshFxNoSuchFile + case syscall.EACCES, syscall.EPERM: + return sshFxPermissionDenied + } + + return sshFxFailure +} + +func translateSyscallError(err error) (uint32, bool) { + switch e := err.(type) { + case syscall.Errno: + return translateErrno(e), true + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.Errno); ok { + return translateErrno(errno), true + } + } + return 0, false +} diff --git a/vendor/github.com/pkg/sftp/ls_unix.go b/vendor/github.com/pkg/sftp/ls_unix.go index 0beba32b2..aa230dc28 100644 --- a/vendor/github.com/pkg/sftp/ls_unix.go +++ b/vendor/github.com/pkg/sftp/ls_unix.go @@ -1,5 +1,5 @@ -//go:build aix || darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || js -// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js +//go:build aix || darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || js || zos +// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js zos package sftp diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go index 1232ff1ef..bfe6a3c97 100644 --- a/vendor/github.com/pkg/sftp/packet.go +++ b/vendor/github.com/pkg/sftp/packet.go @@ -56,6 +56,11 @@ func marshalFileInfo(b []byte, fi os.FileInfo) []byte { flags, fileStat := fileStatFromInfo(fi) b = marshalUint32(b, flags) + + return marshalFileStat(b, flags, fileStat) +} + +func marshalFileStat(b []byte, flags uint32, fileStat *FileStat) []byte { if flags&sshFileXferAttrSize != 0 { b = marshalUint64(b, fileStat.Size) } @@ -91,10 +96,9 @@ func marshalStatus(b []byte, err StatusError) []byte { } func marshal(b []byte, v interface{}) []byte { - if v == nil { - return b - } switch v := v.(type) { + case nil: + return b case uint8: return append(b, v) case uint32: @@ -103,6 +107,8 @@ func marshal(b []byte, v interface{}) []byte { return marshalUint64(b, v) case string: return marshalString(b, v) + case []byte: + return append(b, v...) case os.FileInfo: return marshalFileInfo(b, v) default: @@ -168,38 +174,69 @@ func unmarshalStringSafe(b []byte) (string, []byte, error) { return string(b[:n]), b[n:], nil } -func unmarshalAttrs(b []byte) (*FileStat, []byte) { - flags, b := unmarshalUint32(b) +func unmarshalAttrs(b []byte) (*FileStat, []byte, error) { + flags, b, err := unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } return unmarshalFileStat(flags, b) } -func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) { +func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte, error) { var fs FileStat + var err error + if flags&sshFileXferAttrSize == sshFileXferAttrSize { - fs.Size, b, _ = unmarshalUint64Safe(b) - } - if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { - fs.UID, b, _ = unmarshalUint32Safe(b) + fs.Size, b, err = unmarshalUint64Safe(b) + if err != nil { + return nil, b, err + } } if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { - fs.GID, b, _ = unmarshalUint32Safe(b) + fs.UID, b, err = unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } + fs.GID, b, err = unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } } if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { - fs.Mode, b, _ = unmarshalUint32Safe(b) + fs.Mode, b, err = unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } } if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { - fs.Atime, b, _ = unmarshalUint32Safe(b) - fs.Mtime, b, _ = unmarshalUint32Safe(b) + fs.Atime, b, err = unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } + fs.Mtime, b, err = unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } } if flags&sshFileXferAttrExtended == sshFileXferAttrExtended { var count uint32 - count, b, _ = unmarshalUint32Safe(b) + count, b, err = unmarshalUint32Safe(b) + if err != nil { + return nil, b, err + } + ext := make([]StatExtended, count) for i := uint32(0); i < count; i++ { var typ string var data string - typ, b, _ = unmarshalStringSafe(b) - data, b, _ = unmarshalStringSafe(b) + typ, b, err = unmarshalStringSafe(b) + if err != nil { + return nil, b, err + } + data, b, err = unmarshalStringSafe(b) + if err != nil { + return nil, b, err + } ext[i] = StatExtended{ ExtType: typ, ExtData: data, @@ -207,7 +244,7 @@ func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) { } fs.Extended = ext } - return &fs, b + return &fs, b, nil } func unmarshalStatus(id uint32, data []byte) error { @@ -681,12 +718,13 @@ type sshFxpOpenPacket struct { ID uint32 Path string Pflags uint32 - Flags uint32 // ignored + Flags uint32 + Attrs interface{} } func (p *sshFxpOpenPacket) id() uint32 { return p.ID } -func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) { +func (p *sshFxpOpenPacket) marshalPacket() ([]byte, []byte, error) { l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) 4 + len(p.Path) + 4 + 4 @@ -698,7 +736,22 @@ func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) { b = marshalUint32(b, p.Pflags) b = marshalUint32(b, p.Flags) - return b, nil + switch attrs := p.Attrs.(type) { + case []byte: + return b, attrs, nil // may as well short-ciruit this case. + case os.FileInfo: + _, fs := fileStatFromInfo(attrs) // we throw away the flags, and override with those in packet. + return b, marshalFileStat(nil, p.Flags, fs), nil + case *FileStat: + return b, marshalFileStat(nil, p.Flags, attrs), nil + } + + return b, marshal(nil, p.Attrs), nil +} + +func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err } func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { @@ -709,12 +762,25 @@ func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { return err } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { return err - } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { return err } + p.Attrs = b return nil } +func (p *sshFxpOpenPacket) unmarshalFileStat(flags uint32) (*FileStat, error) { + switch attrs := p.Attrs.(type) { + case *FileStat: + return attrs, nil + case []byte: + fs, _, err := unmarshalFileStat(flags, attrs) + return fs, err + default: + return nil, fmt.Errorf("invalid type in unmarshalFileStat: %T", attrs) + } +} + type sshFxpReadPacket struct { ID uint32 Len uint32 @@ -757,7 +823,7 @@ func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { // So, we need: uint32(length) + byte(type) + uint32(id) + uint32(data_length) const dataHeaderLen = 4 + 1 + 4 + 4 -func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32) []byte { +func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32, maxTxPacket uint32) []byte { dataLen := p.Len if dataLen > maxTxPacket { dataLen = maxTxPacket @@ -943,9 +1009,17 @@ func (p *sshFxpSetstatPacket) marshalPacket() ([]byte, []byte, error) { b = marshalString(b, p.Path) b = marshalUint32(b, p.Flags) - payload := marshal(nil, p.Attrs) + switch attrs := p.Attrs.(type) { + case []byte: + return b, attrs, nil // may as well short-ciruit this case. + case os.FileInfo: + _, fs := fileStatFromInfo(attrs) // we throw away the flags, and override with those in packet. + return b, marshalFileStat(nil, p.Flags, fs), nil + case *FileStat: + return b, marshalFileStat(nil, p.Flags, attrs), nil + } - return b, payload, nil + return b, marshal(nil, p.Attrs), nil } func (p *sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { @@ -964,9 +1038,17 @@ func (p *sshFxpFsetstatPacket) marshalPacket() ([]byte, []byte, error) { b = marshalString(b, p.Handle) b = marshalUint32(b, p.Flags) - payload := marshal(nil, p.Attrs) + switch attrs := p.Attrs.(type) { + case []byte: + return b, attrs, nil // may as well short-ciruit this case. + case os.FileInfo: + _, fs := fileStatFromInfo(attrs) // we throw away the flags, and override with those in packet. + return b, marshalFileStat(nil, p.Flags, fs), nil + case *FileStat: + return b, marshalFileStat(nil, p.Flags, attrs), nil + } - return b, payload, nil + return b, marshal(nil, p.Attrs), nil } func (p *sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { @@ -987,6 +1069,18 @@ func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { return nil } +func (p *sshFxpSetstatPacket) unmarshalFileStat(flags uint32) (*FileStat, error) { + switch attrs := p.Attrs.(type) { + case *FileStat: + return attrs, nil + case []byte: + fs, _, err := unmarshalFileStat(flags, attrs) + return fs, err + default: + return nil, fmt.Errorf("invalid type in unmarshalFileStat: %T", attrs) + } +} + func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { var err error if p.ID, b, err = unmarshalUint32Safe(b); err != nil { @@ -1000,6 +1094,18 @@ func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { return nil } +func (p *sshFxpFsetstatPacket) unmarshalFileStat(flags uint32) (*FileStat, error) { + switch attrs := p.Attrs.(type) { + case *FileStat: + return attrs, nil + case []byte: + fs, _, err := unmarshalFileStat(flags, attrs) + return fs, err + default: + return nil, fmt.Errorf("invalid type in unmarshalFileStat: %T", attrs) + } +} + type sshFxpHandlePacket struct { ID uint32 Handle string diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go index b5c95b4ad..476c56518 100644 --- a/vendor/github.com/pkg/sftp/request-attrs.go +++ b/vendor/github.com/pkg/sftp/request-attrs.go @@ -3,7 +3,6 @@ package sftp // Methods on the Request object to make working with the Flags bitmasks and // Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write // request and AttrFlags() and Attributes() when working with SetStat requests. -import "os" // FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags // (https://golang.org/pkg/os/#pkg-constants). @@ -50,14 +49,9 @@ func (r *Request) AttrFlags() FileAttrFlags { return newFileAttrFlags(r.Flags) } -// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode -func (a FileStat) FileMode() os.FileMode { - return os.FileMode(a.Mode) -} - // Attributes parses file attributes byte blob and return them in a // FileStat object. func (r *Request) Attributes() *FileStat { - fs, _ := unmarshalFileStat(r.Flags, r.Attrs) + fs, _, _ := unmarshalFileStat(r.Flags, r.Attrs) return fs } diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go index 2090e3162..13e7577e3 100644 --- a/vendor/github.com/pkg/sftp/request-interfaces.go +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -30,7 +30,7 @@ type FileReader interface { // FileWriter should return an io.WriterAt for the filepath. // // The request server code will call Close() on the returned io.WriterAt -// ojbect if an io.Closer type assertion succeeds. +// object if an io.Closer type assertion succeeds. // Note in cases of an error, the error text will be sent to the client. // Note when receiving an Append flag it is important to not open files using // O_APPEND if you plan to use WriteAt, as they conflict. @@ -144,6 +144,8 @@ type NameLookupFileLister interface { // // If a populated entry implements [FileInfoExtendedData], extended attributes will also be returned to the client. // +// The request server code will call Close() on ListerAt if an io.Closer type assertion succeeds. +// // Note in cases of an error, the error text will be sent to the client. type ListerAt interface { ListAt([]os.FileInfo, int64) (int, error) diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go index 7a99db648..11047e6b1 100644 --- a/vendor/github.com/pkg/sftp/request-server.go +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -10,7 +10,7 @@ import ( "sync" ) -var maxTxPacket uint32 = 1 << 15 +const defaultMaxTxPacket uint32 = 1 << 15 // Handlers contains the 4 SFTP server request handlers. type Handlers struct { @@ -28,6 +28,7 @@ type RequestServer struct { pktMgr *packetManager startDirectory string + maxTxPacket uint32 mu sync.RWMutex handleCount int @@ -57,6 +58,22 @@ func WithStartDirectory(startDirectory string) RequestServerOption { } } +// WithRSMaxTxPacket sets the maximum size of the payload returned to the client, +// measured in bytes. The default value is 32768 bytes, and this option +// can only be used to increase it. Setting this option to a larger value +// should be safe, because the client decides the size of the requested payload. +// +// The default maximum packet size is 32768 bytes. +func WithRSMaxTxPacket(size uint32) RequestServerOption { + return func(rs *RequestServer) { + if size < defaultMaxTxPacket { + return + } + + rs.maxTxPacket = size + } +} + // NewRequestServer creates/allocates/returns new RequestServer. // Normally there will be one server per user-session. func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer { @@ -73,6 +90,7 @@ func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServ pktMgr: newPktMgr(svrConn), startDirectory: "/", + maxTxPacket: defaultMaxTxPacket, openRequests: make(map[string]*Request), } @@ -260,7 +278,7 @@ func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedR Method: "Stat", Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID, rs.maxTxPacket) } case *sshFxpFsetstatPacket: handle := pkt.getHandle() @@ -272,7 +290,7 @@ func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedR Method: "Setstat", Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID, rs.maxTxPacket) } case *sshFxpExtendedPacketPosixRename: request := &Request{ @@ -280,24 +298,24 @@ func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedR Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath), Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath), } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID, rs.maxTxPacket) case *sshFxpExtendedPacketStatVFS: request := &Request{ Method: "StatVFS", Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path), } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID, rs.maxTxPacket) case hasHandle: handle := pkt.getHandle() request, ok := rs.getRequest(handle) if !ok { rpkt = statusFromError(pkt.id(), EBADF) } else { - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID, rs.maxTxPacket) } case hasPath: request := requestFromPacket(ctx, pkt, rs.startDirectory) - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID, rs.maxTxPacket) request.close() default: rpkt = statusFromError(pkt.id(), ErrSSHFxOpUnsupported) diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go index 57d788df6..e7c47a9c9 100644 --- a/vendor/github.com/pkg/sftp/request.go +++ b/vendor/github.com/pkg/sftp/request.go @@ -121,6 +121,22 @@ func (s *state) getListerAt() ListerAt { return s.listerAt } +func (s *state) closeListerAt() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + + if s.listerAt != nil { + if c, ok := s.listerAt.(io.Closer); ok { + err = c.Close() + } + s.listerAt = nil + } + + return err +} + // Request contains the data and state for the incoming service request. type Request struct { // Get, Put, Setstat, Stat, Rename, Remove @@ -178,6 +194,7 @@ func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Reques switch p := pkt.(type) { case *sshFxpOpenPacket: request.Flags = p.Pflags + request.Attrs = p.Attrs.([]byte) case *sshFxpSetstatPacket: request.Flags = p.Flags request.Attrs = p.Attrs.([]byte) @@ -229,9 +246,9 @@ func (r *Request) close() error { } }() - rd, wr, rw := r.getAllReaderWriters() + err := r.state.closeListerAt() - var err error + rd, wr, rw := r.getAllReaderWriters() // Close errors on a Writer are far more likely to be the important one. // As they can be information that there was a loss of data. @@ -283,14 +300,14 @@ func (r *Request) transferError(err error) { } // called from worker to handle packet/request -func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { +func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32, maxTxPacket uint32) responsePacket { switch r.Method { case "Get": - return fileget(handlers.FileGet, r, pkt, alloc, orderID) + return fileget(handlers.FileGet, r, pkt, alloc, orderID, maxTxPacket) case "Put": - return fileput(handlers.FilePut, r, pkt, alloc, orderID) + return fileput(handlers.FilePut, r, pkt, alloc, orderID, maxTxPacket) case "Open": - return fileputget(handlers.FilePut, r, pkt, alloc, orderID) + return fileputget(handlers.FilePut, r, pkt, alloc, orderID, maxTxPacket) case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove", "PosixRename", "StatVFS": return filecmd(handlers.FileCmd, r, pkt) case "List": @@ -375,13 +392,13 @@ func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { } // wrap FileReader handler -func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { +func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32, maxTxPacket uint32) responsePacket { rd := r.getReaderAt() if rd == nil { return statusFromError(pkt.id(), errors.New("unexpected read packet")) } - data, offset, _ := packetData(pkt, alloc, orderID) + data, offset, _ := packetData(pkt, alloc, orderID, maxTxPacket) n, err := rd.ReadAt(data, offset) // only return EOF error if no data left to read @@ -397,20 +414,20 @@ func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orde } // wrap FileWriter handler -func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { +func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32, maxTxPacket uint32) responsePacket { wr := r.getWriterAt() if wr == nil { return statusFromError(pkt.id(), errors.New("unexpected write packet")) } - data, offset, _ := packetData(pkt, alloc, orderID) + data, offset, _ := packetData(pkt, alloc, orderID, maxTxPacket) _, err := wr.WriteAt(data, offset) return statusFromError(pkt.id(), err) } // wrap OpenFileWriter handler -func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { +func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32, maxTxPacket uint32) responsePacket { rw := r.getWriterAtReaderAt() if rw == nil { return statusFromError(pkt.id(), errors.New("unexpected write and read packet")) @@ -418,7 +435,7 @@ func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, o switch p := pkt.(type) { case *sshFxpReadPacket: - data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset) + data, offset := p.getDataSlice(alloc, orderID, maxTxPacket), int64(p.Offset) n, err := rw.ReadAt(data, offset) // only return EOF error if no data left to read @@ -444,10 +461,10 @@ func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, o } // file data for additional read/write packets -func packetData(p requestPacket, alloc *allocator, orderID uint32) (data []byte, offset int64, length uint32) { +func packetData(p requestPacket, alloc *allocator, orderID uint32, maxTxPacket uint32) (data []byte, offset int64, length uint32) { switch p := p.(type) { case *sshFxpReadPacket: - return p.getDataSlice(alloc, orderID), int64(p.Offset), p.Len + return p.getDataSlice(alloc, orderID, maxTxPacket), int64(p.Offset), p.Len case *sshFxpWritePacket: return p.Data, int64(p.Offset), p.Length } diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go index 2e419f590..fb474c4f3 100644 --- a/vendor/github.com/pkg/sftp/server.go +++ b/vendor/github.com/pkg/sftp/server.go @@ -34,6 +34,7 @@ type Server struct { openFilesLock sync.RWMutex handleCount int workDir string + maxTxPacket uint32 } func (svr *Server) nextHandle(f *os.File) string { @@ -86,6 +87,7 @@ func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) debugStream: ioutil.Discard, pktMgr: newPktMgr(svrConn), openFiles: make(map[string]*os.File), + maxTxPacket: defaultMaxTxPacket, } for _, o := range options { @@ -139,6 +141,24 @@ func WithServerWorkingDirectory(workDir string) ServerOption { } } +// WithMaxTxPacket sets the maximum size of the payload returned to the client, +// measured in bytes. The default value is 32768 bytes, and this option +// can only be used to increase it. Setting this option to a larger value +// should be safe, because the client decides the size of the requested payload. +// +// The default maximum packet size is 32768 bytes. +func WithMaxTxPacket(size uint32) ServerOption { + return func(s *Server) error { + if size < defaultMaxTxPacket { + return errors.New("size must be greater than or equal to 32768") + } + + s.maxTxPacket = size + + return nil + } +} + type rxPacket struct { pktType fxp pktBytes []byte @@ -287,7 +307,7 @@ func handlePacket(s *Server, p orderedRequest) error { f, ok := s.getHandle(p.Handle) if ok { err = nil - data := p.getDataSlice(s.pktMgr.alloc, orderID) + data := p.getDataSlice(s.pktMgr.alloc, orderID, s.maxTxPacket) n, _err := f.ReadAt(data, int64(p.Offset)) if _err != nil && (_err != io.EOF || n == 0) { err = _err @@ -462,7 +482,18 @@ func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { osFlags |= os.O_EXCL } - f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, 0o644) + mode := os.FileMode(0o644) + // Like OpenSSH, we only handle permissions here, and only when the file is being created. + // Otherwise, the permissions are ignored. + if p.Flags&sshFileXferAttrPermissions != 0 { + fs, err := p.unmarshalFileStat(p.Flags) + if err != nil { + return statusFromError(p.ID, err) + } + mode = fs.FileMode() & os.ModePerm + } + + f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, mode) if err != nil { return statusFromError(p.ID, err) } @@ -496,44 +527,23 @@ func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket { } func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error + path := svr.toLocalPath(p.Path) - p.Path = svr.toLocalPath(p.Path) + debug("setstat name %q", path) - debug("setstat name \"%s\"", p.Path) - if (p.Flags & sshFileXferAttrSize) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = os.Truncate(p.Path, int64(size)) - } + fs, err := p.unmarshalFileStat(p.Flags) + + if err == nil && (p.Flags&sshFileXferAttrSize) != 0 { + err = os.Truncate(path, int64(fs.Size)) } - if (p.Flags & sshFileXferAttrPermissions) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = os.Chmod(p.Path, os.FileMode(mode)) - } + if err == nil && (p.Flags&sshFileXferAttrPermissions) != 0 { + err = os.Chmod(path, fs.FileMode()) } - if (p.Flags & sshFileXferAttrACmodTime) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(p.Path, atimeT, mtimeT) - } + if err == nil && (p.Flags&sshFileXferAttrUIDGID) != 0 { + err = os.Chown(path, int(fs.UID), int(fs.GID)) } - if (p.Flags & sshFileXferAttrUIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, _, err = unmarshalUint32Safe(b); err != nil { - } else { - err = os.Chown(p.Path, int(uid), int(gid)) - } + if err == nil && (p.Flags&sshFileXferAttrACmodTime) != 0 { + err = os.Chtimes(path, fs.AccessTime(), fs.ModTime()) } return statusFromError(p.ID, err) @@ -545,41 +555,32 @@ func (p *sshFxpFsetstatPacket) respond(svr *Server) responsePacket { return statusFromError(p.ID, EBADF) } - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error + path := f.Name() - debug("fsetstat name \"%s\"", f.Name()) - if (p.Flags & sshFileXferAttrSize) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = f.Truncate(int64(size)) - } + debug("fsetstat name %q", path) + + fs, err := p.unmarshalFileStat(p.Flags) + + if err == nil && (p.Flags&sshFileXferAttrSize) != 0 { + err = f.Truncate(int64(fs.Size)) } - if (p.Flags & sshFileXferAttrPermissions) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = f.Chmod(os.FileMode(mode)) - } + if err == nil && (p.Flags&sshFileXferAttrPermissions) != 0 { + err = f.Chmod(fs.FileMode()) } - if (p.Flags & sshFileXferAttrACmodTime) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(f.Name(), atimeT, mtimeT) - } + if err == nil && (p.Flags&sshFileXferAttrUIDGID) != 0 { + err = f.Chown(int(fs.UID), int(fs.GID)) } - if (p.Flags & sshFileXferAttrUIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, _, err = unmarshalUint32Safe(b); err != nil { - } else { - err = f.Chown(int(uid), int(gid)) + if err == nil && (p.Flags&sshFileXferAttrACmodTime) != 0 { + type chtimer interface { + Chtimes(atime, mtime time.Time) error + } + + switch f := interface{}(f).(type) { + case chtimer: + // future-compatible, for when/if *os.File supports Chtimes. + err = f.Chtimes(fs.AccessTime(), fs.ModTime()) + default: + err = os.Chtimes(path, fs.AccessTime(), fs.ModTime()) } } diff --git a/vendor/github.com/pkg/sftp/stat.go b/vendor/github.com/pkg/sftp/stat.go new file mode 100644 index 000000000..2bb2c1370 --- /dev/null +++ b/vendor/github.com/pkg/sftp/stat.go @@ -0,0 +1,94 @@ +package sftp + +import ( + "os" + + sshfx "github.com/pkg/sftp/internal/encoding/ssh/filexfer" +) + +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return sshfx.FileMode(mode)&sshfx.ModeType == sshfx.ModeRegular +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + + switch sshfx.FileMode(mode) & sshfx.ModeType { + case sshfx.ModeDevice: + fm |= os.ModeDevice + case sshfx.ModeCharDevice: + fm |= os.ModeDevice | os.ModeCharDevice + case sshfx.ModeDir: + fm |= os.ModeDir + case sshfx.ModeNamedPipe: + fm |= os.ModeNamedPipe + case sshfx.ModeSymlink: + fm |= os.ModeSymlink + case sshfx.ModeRegular: + // nothing to do + case sshfx.ModeSocket: + fm |= os.ModeSocket + } + + if sshfx.FileMode(mode)&sshfx.ModeSetUID != 0 { + fm |= os.ModeSetuid + } + if sshfx.FileMode(mode)&sshfx.ModeSetGID != 0 { + fm |= os.ModeSetgid + } + if sshfx.FileMode(mode)&sshfx.ModeSticky != 0 { + fm |= os.ModeSticky + } + + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := sshfx.FileMode(mode & os.ModePerm) + + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= sshfx.ModeCharDevice + case os.ModeDevice: + ret |= sshfx.ModeDevice + case os.ModeDir: + ret |= sshfx.ModeDir + case os.ModeNamedPipe: + ret |= sshfx.ModeNamedPipe + case os.ModeSymlink: + ret |= sshfx.ModeSymlink + case 0: + ret |= sshfx.ModeRegular + case os.ModeSocket: + ret |= sshfx.ModeSocket + } + + if mode&os.ModeSetuid != 0 { + ret |= sshfx.ModeSetUID + } + if mode&os.ModeSetgid != 0 { + ret |= sshfx.ModeSetGID + } + if mode&os.ModeSticky != 0 { + ret |= sshfx.ModeSticky + } + + return uint32(ret) +} + +const ( + s_ISUID = uint32(sshfx.ModeSetUID) + s_ISGID = uint32(sshfx.ModeSetGID) + s_ISVTX = uint32(sshfx.ModeSticky) +) + +// S_IFMT is a legacy export, and was brought in to support GOOS environments whose sysconfig.S_IFMT may be different from the value used internally by SFTP standards. +// There should be no reason why you need to import it, or use it, but unexporting it could cause code to break in a way that cannot be readily fixed. +// As such, we continue to export this value as the value used in the SFTP standard. +// +// Deprecated: Remove use of this value, and avoid any future use as well. +// There is no alternative provided, you should never need to access this value. +const S_IFMT = uint32(sshfx.ModeType) diff --git a/vendor/github.com/pkg/sftp/stat_plan9.go b/vendor/github.com/pkg/sftp/stat_plan9.go deleted file mode 100644 index 761abdf56..000000000 --- a/vendor/github.com/pkg/sftp/stat_plan9.go +++ /dev/null @@ -1,103 +0,0 @@ -package sftp - -import ( - "os" - "syscall" -) - -var EBADF = syscall.NewError("fd out of range or not open") - -func wrapPathError(filepath string, err error) error { - if errno, ok := err.(syscall.ErrorString); ok { - return &os.PathError{Path: filepath, Err: errno} - } - return err -} - -// translateErrno translates a syscall error number to a SFTP error code. -func translateErrno(errno syscall.ErrorString) uint32 { - switch errno { - case "": - return sshFxOk - case syscall.ENOENT: - return sshFxNoSuchFile - case syscall.EPERM: - return sshFxPermissionDenied - } - - return sshFxFailure -} - -func translateSyscallError(err error) (uint32, bool) { - switch e := err.(type) { - case syscall.ErrorString: - return translateErrno(e), true - case *os.PathError: - debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) - if errno, ok := e.Err.(syscall.ErrorString); ok { - return translateErrno(errno), true - } - } - return 0, false -} - -// isRegular returns true if the mode describes a regular file. -func isRegular(mode uint32) bool { - return mode&S_IFMT == syscall.S_IFREG -} - -// toFileMode converts sftp filemode bits to the os.FileMode specification -func toFileMode(mode uint32) os.FileMode { - var fm = os.FileMode(mode & 0777) - - switch mode & S_IFMT { - case syscall.S_IFBLK: - fm |= os.ModeDevice - case syscall.S_IFCHR: - fm |= os.ModeDevice | os.ModeCharDevice - case syscall.S_IFDIR: - fm |= os.ModeDir - case syscall.S_IFIFO: - fm |= os.ModeNamedPipe - case syscall.S_IFLNK: - fm |= os.ModeSymlink - case syscall.S_IFREG: - // nothing to do - case syscall.S_IFSOCK: - fm |= os.ModeSocket - } - - return fm -} - -// fromFileMode converts from the os.FileMode specification to sftp filemode bits -func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(mode & os.ModePerm) - - switch mode & os.ModeType { - case os.ModeDevice | os.ModeCharDevice: - ret |= syscall.S_IFCHR - case os.ModeDevice: - ret |= syscall.S_IFBLK - case os.ModeDir: - ret |= syscall.S_IFDIR - case os.ModeNamedPipe: - ret |= syscall.S_IFIFO - case os.ModeSymlink: - ret |= syscall.S_IFLNK - case 0: - ret |= syscall.S_IFREG - case os.ModeSocket: - ret |= syscall.S_IFSOCK - } - - return ret -} - -// Plan 9 doesn't have setuid, setgid or sticky, but a Plan 9 client should -// be able to send these bits to a POSIX server. -const ( - s_ISUID = 04000 - s_ISGID = 02000 - s_ISVTX = 01000 -) diff --git a/vendor/github.com/pkg/sftp/stat_posix.go b/vendor/github.com/pkg/sftp/stat_posix.go deleted file mode 100644 index 5b870e23c..000000000 --- a/vendor/github.com/pkg/sftp/stat_posix.go +++ /dev/null @@ -1,124 +0,0 @@ -//go:build !plan9 -// +build !plan9 - -package sftp - -import ( - "os" - "syscall" -) - -const EBADF = syscall.EBADF - -func wrapPathError(filepath string, err error) error { - if errno, ok := err.(syscall.Errno); ok { - return &os.PathError{Path: filepath, Err: errno} - } - return err -} - -// translateErrno translates a syscall error number to a SFTP error code. -func translateErrno(errno syscall.Errno) uint32 { - switch errno { - case 0: - return sshFxOk - case syscall.ENOENT: - return sshFxNoSuchFile - case syscall.EACCES, syscall.EPERM: - return sshFxPermissionDenied - } - - return sshFxFailure -} - -func translateSyscallError(err error) (uint32, bool) { - switch e := err.(type) { - case syscall.Errno: - return translateErrno(e), true - case *os.PathError: - debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) - if errno, ok := e.Err.(syscall.Errno); ok { - return translateErrno(errno), true - } - } - return 0, false -} - -// isRegular returns true if the mode describes a regular file. -func isRegular(mode uint32) bool { - return mode&S_IFMT == syscall.S_IFREG -} - -// toFileMode converts sftp filemode bits to the os.FileMode specification -func toFileMode(mode uint32) os.FileMode { - var fm = os.FileMode(mode & 0777) - - switch mode & S_IFMT { - case syscall.S_IFBLK: - fm |= os.ModeDevice - case syscall.S_IFCHR: - fm |= os.ModeDevice | os.ModeCharDevice - case syscall.S_IFDIR: - fm |= os.ModeDir - case syscall.S_IFIFO: - fm |= os.ModeNamedPipe - case syscall.S_IFLNK: - fm |= os.ModeSymlink - case syscall.S_IFREG: - // nothing to do - case syscall.S_IFSOCK: - fm |= os.ModeSocket - } - - if mode&syscall.S_ISUID != 0 { - fm |= os.ModeSetuid - } - if mode&syscall.S_ISGID != 0 { - fm |= os.ModeSetgid - } - if mode&syscall.S_ISVTX != 0 { - fm |= os.ModeSticky - } - - return fm -} - -// fromFileMode converts from the os.FileMode specification to sftp filemode bits -func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(mode & os.ModePerm) - - switch mode & os.ModeType { - case os.ModeDevice | os.ModeCharDevice: - ret |= syscall.S_IFCHR - case os.ModeDevice: - ret |= syscall.S_IFBLK - case os.ModeDir: - ret |= syscall.S_IFDIR - case os.ModeNamedPipe: - ret |= syscall.S_IFIFO - case os.ModeSymlink: - ret |= syscall.S_IFLNK - case 0: - ret |= syscall.S_IFREG - case os.ModeSocket: - ret |= syscall.S_IFSOCK - } - - if mode&os.ModeSetuid != 0 { - ret |= syscall.S_ISUID - } - if mode&os.ModeSetgid != 0 { - ret |= syscall.S_ISGID - } - if mode&os.ModeSticky != 0 { - ret |= syscall.S_ISVTX - } - - return ret -} - -const ( - s_ISUID = syscall.S_ISUID - s_ISGID = syscall.S_ISGID - s_ISVTX = syscall.S_ISVTX -) diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go deleted file mode 100644 index e84430830..000000000 --- a/vendor/github.com/pkg/sftp/syscall_fixed.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build plan9 || windows || (js && wasm) -// +build plan9 windows js,wasm - -// Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of -// 0xf000. None of the the other S_IFxyz values include the "1" (in 0x1f000) -// which prevents them from matching the bitmask. - -package sftp - -const S_IFMT = 0xf000 diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go deleted file mode 100644 index 50052189e..000000000 --- a/vendor/github.com/pkg/sftp/syscall_good.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !plan9 && !windows && (!js || !wasm) -// +build !plan9 -// +build !windows -// +build !js !wasm - -package sftp - -import "syscall" - -const S_IFMT = syscall.S_IFMT diff --git a/vendor/github.com/planetscale/vtprotobuf/LICENSE b/vendor/github.com/planetscale/vtprotobuf/LICENSE new file mode 100644 index 000000000..dc61de846 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2021, PlanetScale Inc. All rights reserved. +Copyright (c) 2013, The GoGo Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go b/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go new file mode 100644 index 000000000..64bde83ed --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go @@ -0,0 +1,122 @@ +// Package protohelpers provides helper functions for encoding and decoding protobuf messages. +// The spec can be found at https://protobuf.dev/programming-guides/encoding/. +package protohelpers + +import ( + "fmt" + "io" + "math/bits" +) + +var ( + // ErrInvalidLength is returned when decoding a negative length. + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + // ErrIntOverflow is returned when decoding a varint representation of an integer that overflows 64 bits. + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + // ErrUnexpectedEndOfGroup is returned when decoding a group end without a corresponding group start. + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) + +// EncodeVarint encodes a uint64 into a varint-encoded byte slice and returns the offset of the encoded value. +// The provided offset is the offset after the last byte of the encoded value. +func EncodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= SizeOfVarint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +// SizeOfVarint returns the size of the varint-encoded value. +func SizeOfVarint(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} + +// SizeOfZigzag returns the size of the zigzag-encoded value. +func SizeOfZigzag(x uint64) (n int) { + return SizeOfVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// Skip the first record of the byte slice and return the offset of the next record. +func Skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index 38f80d5ae..584aac971 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -69,69 +69,69 @@ type Extensions struct { // Deprecated // Triggering event of the Github Workflow. Matches the `event_name` claim of ID // tokens from Github Actions - GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2 + GithubWorkflowTrigger string `json:"GithubWorkflowTrigger,omitempty" yaml:"github-workflow-trigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 // Deprecated // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID // tokens from Github Actions - GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3 + GithubWorkflowSHA string `json:"GithubWorkflowSHA,omitempty" yaml:"github-workflow-sha,omitempty"` // OID 1.3.6.1.4.1.57264.1.3 // Deprecated // Name of Github Actions Workflow. Matches the `workflow` claim of the ID // tokens from Github Actions - GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4 + GithubWorkflowName string `json:"GithubWorkflowName,omitempty" yaml:"github-workflow-name,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 // Deprecated // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID // tokens from Github Actions - GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5 + GithubWorkflowRepository string `json:"GithubWorkflowRepository,omitempty" yaml:"github-workflow-repository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 // Deprecated // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens // from Github Actions - GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6 + GithubWorkflowRef string `json:"GithubWorkflowRef,omitempty" yaml:"github-workflow-ref,omitempty"` // 1.3.6.1.4.1.57264.1.6 // Reference to specific build instructions that are responsible for signing. - BuildSignerURI string // 1.3.6.1.4.1.57264.1.9 + BuildSignerURI string `json:"BuildSignerURI,omitempty" yaml:"build-signer-uri,omitempty"` // 1.3.6.1.4.1.57264.1.9 // Immutable reference to the specific version of the build instructions that is responsible for signing. - BuildSignerDigest string // 1.3.6.1.4.1.57264.1.10 + BuildSignerDigest string `json:"BuildSignerDigest,omitempty" yaml:"build-signer-digest,omitempty"` // 1.3.6.1.4.1.57264.1.10 // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. - RunnerEnvironment string // 1.3.6.1.4.1.57264.1.11 + RunnerEnvironment string `json:"RunnerEnvironment,omitempty" yaml:"runner-environment,omitempty"` // 1.3.6.1.4.1.57264.1.11 // Source repository URL that the build was based on. - SourceRepositoryURI string // 1.3.6.1.4.1.57264.1.12 + SourceRepositoryURI string `json:"SourceRepositoryURI,omitempty" yaml:"source-repository-uri,omitempty"` // 1.3.6.1.4.1.57264.1.12 // Immutable reference to a specific version of the source code that the build was based upon. - SourceRepositoryDigest string // 1.3.6.1.4.1.57264.1.13 + SourceRepositoryDigest string `json:"SourceRepositoryDigest,omitempty" yaml:"source-repository-digest,omitempty"` // 1.3.6.1.4.1.57264.1.13 // Source Repository Ref that the build run was based upon. - SourceRepositoryRef string // 1.3.6.1.4.1.57264.1.14 + SourceRepositoryRef string `json:"SourceRepositoryRef,omitempty" yaml:"source-repository-ref,omitempty"` // 1.3.6.1.4.1.57264.1.14 // Immutable identifier for the source repository the workflow was based upon. - SourceRepositoryIdentifier string // 1.3.6.1.4.1.57264.1.15 + SourceRepositoryIdentifier string `json:"SourceRepositoryIdentifier,omitempty" yaml:"source-repository-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 // Source repository owner URL of the owner of the source repository that the build was based on. - SourceRepositoryOwnerURI string // 1.3.6.1.4.1.57264.1.16 + SourceRepositoryOwnerURI string `json:"SourceRepositoryOwnerURI,omitempty" yaml:"source-repository-owner-uri,omitempty"` // 1.3.6.1.4.1.57264.1.16 // Immutable identifier for the owner of the source repository that the workflow was based upon. - SourceRepositoryOwnerIdentifier string // 1.3.6.1.4.1.57264.1.17 + SourceRepositoryOwnerIdentifier string `json:"SourceRepositoryOwnerIdentifier,omitempty" yaml:"source-repository-owner-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 // Build Config URL to the top-level/initiating build instructions. - BuildConfigURI string // 1.3.6.1.4.1.57264.1.18 + BuildConfigURI string `json:"BuildConfigURI,omitempty" yaml:"build-config-uri,omitempty"` // 1.3.6.1.4.1.57264.1.18 // Immutable reference to the specific version of the top-level/initiating build instructions. - BuildConfigDigest string // 1.3.6.1.4.1.57264.1.19 + BuildConfigDigest string `json:"BuildConfigDigest,omitempty" yaml:"build-config-digest,omitempty"` // 1.3.6.1.4.1.57264.1.19 // Event or action that initiated the build. - BuildTrigger string // 1.3.6.1.4.1.57264.1.20 + BuildTrigger string `json:"BuildTrigger,omitempty" yaml:"build-trigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 // Run Invocation URL to uniquely identify the build execution. - RunInvocationURI string // 1.3.6.1.4.1.57264.1.21 + RunInvocationURI string `json:"RunInvocationURI,omitempty" yaml:"run-invocation-uri,omitempty"` // 1.3.6.1.4.1.57264.1.21 // Source repository visibility at the time of signing the certificate. - SourceRepositoryVisibilityAtSigning string // 1.3.6.1.4.1.57264.1.22 + SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty" yaml:"source-repository-visibility-at-signing,omitempty"` // 1.3.6.1.4.1.57264.1.22 } func (e Extensions) Render() ([]pkix.Extension, error) { diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go index a8b2805e6..1e2fa031b 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -20,7 +20,6 @@ import ( "crypto" "crypto/ecdsa" "crypto/ed25519" - "crypto/elliptic" "crypto/rsa" "crypto/sha1" // nolint:gosec "crypto/x509" @@ -104,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error { switch pub := first.(type) { case *rsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "rsa")) + return errors.New(genErrMsg(first, second, "rsa")) } case *ecdsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ecdsa")) + return errors.New(genErrMsg(first, second, "ecdsa")) } case ed25519.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ed25519")) + return errors.New(genErrMsg(first, second, "ed25519")) } default: return errors.New("unsupported key type") @@ -137,47 +136,50 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string { // ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key. func ValidatePubKey(pub crypto.PublicKey) error { + // goodkey policy enforces: + // * RSA + // * Size of key: 2048 <= size <= 4096, size % 8 = 0 + // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) + // * Small primes check for modulus + // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) + // * Key is easily factored with Fermat's factorization method + // * EC + // * Public key Q is not the identity element (Ø) + // * Public key Q's x and y are within [0, p-1] + // * Public key Q is on the curve + // * Public key Q's order matches the subgroups (nQ = Ø) + allowedKeys := &goodkey.AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + ECDSAP521: true, + } + cfg := &goodkey.Config{ + FermatRounds: 100, + AllowedKeys: allowedKeys, + } + p, err := goodkey.NewPolicy(cfg, nil) + if err != nil { + // Should not occur, only chances to return errors are if fermat rounds + // are <0 or when loading blocked/weak keys from disk (not used here) + return errors.New("unable to initialize key policy") + } + switch pk := pub.(type) { case *rsa.PublicKey: - // goodkey policy enforces: - // * Size of key: 2048 <= size <= 4096, size % 8 = 0 - // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) - // * Small primes check for modulus - // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) - // * Key is easily factored with Fermat's factorization method - p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil) - if err != nil { - // Should not occur, only chances to return errors are if fermat rounds - // are <0 or when loading blocked/weak keys from disk (not used here) - return errors.New("unable to initialize key policy") - } // ctx is unused return p.GoodKey(context.Background(), pub) case *ecdsa.PublicKey: - // Unable to use goodkey policy because P-521 curve is not supported - return validateEcdsaKey(pk) + // ctx is unused + return p.GoodKey(context.Background(), pub) case ed25519.PublicKey: return validateEd25519Key(pk) } return errors.New("unsupported public key type") } -// Enforce that the ECDSA key curve is one of: -// * NIST P-256 (secp256r1, prime256v1) -// * NIST P-384 -// * NIST P-521. -// Other EC curves, like secp256k1, are not supported by Go. -func validateEcdsaKey(pub *ecdsa.PublicKey) error { - switch pub.Curve { - case elliptic.P224(): - return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521") - case elliptic.P256(), elliptic.P384(), elliptic.P521(): - return nil - default: - return fmt.Errorf("unexpected ec curve") - } -} - // No validations currently, ED25519 supports only one key size. func validateEd25519Key(_ ed25519.PublicKey) error { return nil diff --git a/vendor/github.com/skeema/knownhosts/CONTRIBUTING.md b/vendor/github.com/skeema/knownhosts/CONTRIBUTING.md new file mode 100644 index 000000000..9624f8276 --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/CONTRIBUTING.md @@ -0,0 +1,36 @@ +# Contributing to skeema/knownhosts + +Thank you for your interest in contributing! This document provides guidelines for submitting pull requests. + +### Link to an issue + +Before starting the pull request process, initial discussion should take place on a GitHub issue first. For bug reports, the issue should track the open bug and confirm it is reproducible. For feature requests, the issue should cover why the feature is necessary. + +In the issue comments, discuss your suggested approach for a fix/implementation, and please wait to get feedback before opening a pull request. + +### Test coverage + +In general, please provide reasonably thorough test coverage. Whenever possible, your PR should aim to match or improve the overall test coverage percentage of the package. You can run tests and check coverage locally using `go test -cover`. We also have CI automation in GitHub Actions which will comment on each pull request with a coverage percentage. + +That said, it is fine to submit an initial draft / work-in-progress PR without coverage, if you are waiting on implementation feedback before writing the tests. + +We intentionally avoid hard-coding SSH keys or known_hosts files into the test logic. Instead, the tests generate new keys and then use them to generate a known_hosts file, which is then cached/reused for that overall test run, in order to keep performance reasonable. + +### Documentation + +Exported types require doc comments. The linter CI step will catch this if missing. + +### Backwards compatibility + +Because this package is imported by [nearly 7000 repos on GitHub](https://github.com/skeema/knownhosts/network/dependents), we must be very strict about backwards compatibility of exported symbols and function signatures. + +Backwards compatibility can be very tricky in some situations. In this case, a maintainer may need to add additional commits to your branch to adjust the approach. Please do not take offense if this occurs; it is sometimes simply faster to implement a refactor on our end directly. When the PR/branch is merged, a merge commit will be used, to ensure your commits appear as-is in the repo history and are still properly credited to you. + +### Avoid rewriting core x/crypto/ssh/knownhosts logic + +skeema/knownhosts is intended to be a relatively thin *wrapper* around x/crypto/ssh/knownhosts, without duplicating or re-implementing the core known_hosts file parsing and host key handling logic. Importers of this package should be confident that it can be used as a nearly-drop-in replacement for x/crypto/ssh/knownhosts without introducing substantial risk, security flaws, parser differentials, or unexpected behavior changes. + +To solve shortcomings in x/crypto/ssh/knownhosts, we try to come up with workarounds that still utilize x/crypto/ssh/knownhosts functionality whenever possible. + +Some bugs in x/crypto/ssh/knownhosts do require re-reading the known_hosts file here to solve, but we make that *optional* by offering separate constructors/types with and without that behavior. + diff --git a/vendor/github.com/skeema/knownhosts/LICENSE b/vendor/github.com/skeema/knownhosts/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE new file mode 100644 index 000000000..a92cb34d6 --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -0,0 +1,13 @@ +Copyright 2024 Skeema LLC and the Skeema Knownhosts authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md new file mode 100644 index 000000000..046bc0edc --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -0,0 +1,133 @@ +# knownhosts: enhanced Golang SSH known_hosts management + +[![build status](https://img.shields.io/github/actions/workflow/status/skeema/knownhosts/tests.yml?branch=main)](https://github.com/skeema/knownhosts/actions) +[![code coverage](https://img.shields.io/coveralls/skeema/knownhosts.svg)](https://coveralls.io/r/skeema/knownhosts) +[![godoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/skeema/knownhosts) + + +> This repo is brought to you by [Skeema](https://github.com/skeema/skeema), a +> declarative pure-SQL schema management system for MySQL and MariaDB. Our +> premium products include extensive [SSH tunnel](https://www.skeema.io/docs/features/ssh/) +> functionality, which internally makes use of this package. + +Go provides excellent functionality for OpenSSH known_hosts files in its +external package [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). +However, that package is somewhat low-level, making it difficult to implement full known_hosts management similar to OpenSSH's command-line behavior. Additionally, [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) has several known issues in edge cases, some of which have remained open for multiple years. + +Package [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) provides a *thin wrapper* around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding the following improvements and fixes without duplicating its core logic: + +* Look up known_hosts public keys for any given host +* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286). (This also properly handles cert algorithms for hosts using CA keys when [using the NewDB constructor](#enhancements-requiring-extra-parsing) added in skeema/knownhosts v1.3.0.) +* Properly match wildcard hostname known_hosts entries regardless of port number, providing a solution for [golang/go#52056](https://github.com/golang/go/issues/52056). (Added in v1.3.0; requires [using the NewDB constructor](#enhancements-requiring-extra-parsing)) +* Write new known_hosts entries to an io.Writer +* Properly format/normalize new known_hosts entries containing ipv6 addresses, providing a solution for [golang/go#53463](https://github.com/golang/go/issues/53463) +* Easily determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet + +## How host key lookup works + +Although [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) doesn't directly expose a way to query its known_host map, we use a subtle trick to do so: invoke the HostKeyCallback with a valid host but a bogus key. The resulting KeyError allows us to determine which public keys are actually present for that host. + +By using this technique, [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) doesn't need to duplicate any of the core known_hosts host-lookup logic from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). + +## Populating ssh.ClientConfig.HostKeyAlgorithms based on known_hosts + +Hosts often have multiple public keys, each of a different type (algorithm). This can be [problematic](https://github.com/golang/go/issues/29286) in [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts): if a host's first public key is *not* in known_hosts, but a key of a different type *is*, the HostKeyCallback returns an error. The solution is to populate `ssh.ClientConfig.HostKeyAlgorithms` based on the algorithms of the known_hosts entries for that host, but +[golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) +does not provide an obvious way to do so. + +This package uses its host key lookup trick in order to make ssh.ClientConfig.HostKeyAlgorithms easy to populate: + +```golang +import ( + "golang.org/x/crypto/ssh" + "github.com/skeema/knownhosts" +) + +func sshConfigForHost(hostWithPort string) (*ssh.ClientConfig, error) { + kh, err := knownhosts.NewDB("/home/myuser/.ssh/known_hosts") + if err != nil { + return nil, err + } + config := &ssh.ClientConfig{ + User: "myuser", + Auth: []ssh.AuthMethod{ /* ... */ }, + HostKeyCallback: kh.HostKeyCallback(), + HostKeyAlgorithms: kh.HostKeyAlgorithms(hostWithPort), + } + return config, nil +} +``` + +## Enhancements requiring extra parsing + +Originally, this package did not re-read/re-parse the known_hosts files at all, relying entirely on [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) for all known_hosts file reading and processing. This package only offered a constructor called `New`, returning a host key callback, identical to the call pattern of [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) but with extra methods available on the callback type. + +However, a couple shortcomings in [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) cannot possibly be solved without re-reading the known_hosts file. Therefore, as of v1.3.0 of this package, we now offer an alternative constructor `NewDB`, which does an additional read of the known_hosts file (after the one from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts)), in order to detect: + +* @cert-authority lines, so that we can correctly return cert key algorithms instead of normal host key algorithms when appropriate +* host pattern wildcards, so that we can match OpenSSH's behavior for non-standard port numbers, unlike how [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) normally treats them + +Aside from *detecting* these special cases, this package otherwise still directly uses [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) for host lookups and all other known_hosts file processing. We do **not** fork or re-implement those core behaviors of [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). + +The performance impact of this extra known_hosts read should be minimal, as the file should typically be in the filesystem cache already from the original read by [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). That said, users who wish to avoid the extra read can stay with the `New` constructor, which intentionally retains its pre-v1.3.0 behavior as-is. However, the extra fixes for @cert-authority and host pattern wildcards will not be enabled in that case. + +## Writing new known_hosts entries + +If you wish to mimic the behavior of OpenSSH's `StrictHostKeyChecking=no` or `StrictHostKeyChecking=ask`, this package provides a few functions to simplify this task. For example: + +```golang +sshHost := "yourserver.com:22" +khPath := "/home/myuser/.ssh/known_hosts" +kh, err := knownhosts.NewDB(khPath) +if err != nil { + log.Fatal("Failed to read known_hosts: ", err) +} + +// Create a custom permissive hostkey callback which still errors on hosts +// with changed keys, but allows unknown hosts and adds them to known_hosts +cb := ssh.HostKeyCallback(func(hostname string, remote net.Addr, key ssh.PublicKey) error { + innerCallback := kh.HostKeyCallback() + err := innerCallback(hostname, remote, key) + if knownhosts.IsHostKeyChanged(err) { + return fmt.Errorf("REMOTE HOST IDENTIFICATION HAS CHANGED for host %s! This may indicate a MitM attack.", hostname) + } else if knownhosts.IsHostUnknown(err) { + f, ferr := os.OpenFile(khPath, os.O_APPEND|os.O_WRONLY, 0600) + if ferr == nil { + defer f.Close() + ferr = knownhosts.WriteKnownHost(f, hostname, remote, key) + } + if ferr == nil { + log.Printf("Added host %s to known_hosts\n", hostname) + } else { + log.Printf("Failed to add host %s to known_hosts: %v\n", hostname, ferr) + } + return nil // permit previously-unknown hosts (warning: may be insecure) + } + return err +}) + +config := &ssh.ClientConfig{ + User: "myuser", + Auth: []ssh.AuthMethod{ /* ... */ }, + HostKeyCallback: cb, + HostKeyAlgorithms: kh.HostKeyAlgorithms(sshHost), +} +``` + +## License + +**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors** + +```text +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go new file mode 100644 index 000000000..2b7536e0d --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -0,0 +1,447 @@ +// Package knownhosts is a thin wrapper around golang.org/x/crypto/ssh/knownhosts, +// adding the ability to obtain the list of host key algorithms for a known host. +package knownhosts + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "os" + "sort" + "strings" + + "golang.org/x/crypto/ssh" + xknownhosts "golang.org/x/crypto/ssh/knownhosts" +) + +// HostKeyDB wraps logic in golang.org/x/crypto/ssh/knownhosts with additional +// behaviors, such as the ability to perform host key/algorithm lookups from +// known_hosts entries. +type HostKeyDB struct { + callback ssh.HostKeyCallback + isCert map[string]bool // keyed by "filename:line" + isWildcard map[string]bool // keyed by "filename:line" +} + +// NewDB creates a HostKeyDB from the given OpenSSH known_hosts file(s). It +// reads and parses the provided files one additional time (beyond logic in +// golang.org/x/crypto/ssh/knownhosts) in order to: +// +// - Handle CA lines properly and return ssh.CertAlgo* values when calling the +// HostKeyAlgorithms method, for use in ssh.ClientConfig.HostKeyAlgorithms +// - Allow * wildcards in hostnames to match on non-standard ports, providing +// a workaround for https://github.com/golang/go/issues/52056 in order to +// align with OpenSSH's wildcard behavior +// +// When supplying multiple files, their order does not matter. +func NewDB(files ...string) (*HostKeyDB, error) { + cb, err := xknownhosts.New(files...) + if err != nil { + return nil, err + } + hkdb := &HostKeyDB{ + callback: cb, + isCert: make(map[string]bool), + isWildcard: make(map[string]bool), + } + + // Re-read each file a single time, looking for @cert-authority lines. The + // logic for reading the file is designed to mimic hostKeyDB.Read from + // golang.org/x/crypto/ssh/knownhosts + for _, filename := range files { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + lineNum := 0 + for scanner.Scan() { + lineNum++ + line := scanner.Bytes() + line = bytes.TrimSpace(line) + // Does the line start with "@cert-authority" followed by whitespace? + if len(line) > 15 && bytes.HasPrefix(line, []byte("@cert-authority")) && (line[15] == ' ' || line[15] == '\t') { + mapKey := fmt.Sprintf("%s:%d", filename, lineNum) + hkdb.isCert[mapKey] = true + line = bytes.TrimSpace(line[16:]) + } + // truncate line to just the host pattern field + if i := bytes.IndexAny(line, "\t "); i >= 0 { + line = line[:i] + } + // Does the host pattern contain a * wildcard and no specific port? + if i := bytes.IndexRune(line, '*'); i >= 0 && !bytes.Contains(line[i:], []byte("]:")) { + mapKey := fmt.Sprintf("%s:%d", filename, lineNum) + hkdb.isWildcard[mapKey] = true + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("knownhosts: %s:%d: %w", filename, lineNum, err) + } + } + return hkdb, nil +} + +// HostKeyCallback returns an ssh.HostKeyCallback. This can be used directly in +// ssh.ClientConfig.HostKeyCallback, as shown in the example for NewDB. +// Alternatively, you can wrap it with an outer callback to potentially handle +// appending a new entry to the known_hosts file; see example in WriteKnownHost. +func (hkdb *HostKeyDB) HostKeyCallback() ssh.HostKeyCallback { + // Either NewDB found no wildcard host patterns, or hkdb was created from + // HostKeyCallback.ToDB in which case we didn't scan known_hosts for them: + // return the callback (which came from x/crypto/ssh/knownhosts) as-is + if len(hkdb.isWildcard) == 0 { + return hkdb.callback + } + + // If we scanned for wildcards and found at least one, return a wrapped + // callback with extra behavior: if the host lookup found no matches, and the + // host arg had a non-standard port, re-do the lookup on standard port 22. If + // that second call returns a *xknownhosts.KeyError, filter down any resulting + // Want keys to known wildcard entries. + f := func(hostname string, remote net.Addr, key ssh.PublicKey) error { + callbackErr := hkdb.callback(hostname, remote, key) + if callbackErr == nil || IsHostKeyChanged(callbackErr) { // hostname has known_host entries as-is + return callbackErr + } + justHost, port, splitErr := net.SplitHostPort(hostname) + if splitErr != nil || port == "" || port == "22" { // hostname already using standard port + return callbackErr + } + // If we reach here, the port was non-standard and no known_host entries + // were found for the non-standard port. Try again with standard port. + if tcpAddr, ok := remote.(*net.TCPAddr); ok && tcpAddr.Port != 22 { + remote = &net.TCPAddr{ + IP: tcpAddr.IP, + Port: 22, + Zone: tcpAddr.Zone, + } + } + callbackErr = hkdb.callback(justHost+":22", remote, key) + var keyErr *xknownhosts.KeyError + if errors.As(callbackErr, &keyErr) && len(keyErr.Want) > 0 { + wildcardKeys := make([]xknownhosts.KnownKey, 0, len(keyErr.Want)) + for _, wantKey := range keyErr.Want { + if hkdb.isWildcard[fmt.Sprintf("%s:%d", wantKey.Filename, wantKey.Line)] { + wildcardKeys = append(wildcardKeys, wantKey) + } + } + callbackErr = &xknownhosts.KeyError{ + Want: wildcardKeys, + } + } + return callbackErr + } + return ssh.HostKeyCallback(f) +} + +// PublicKey wraps ssh.PublicKey with an additional field, to identify +// whether the key corresponds to a certificate authority. +type PublicKey struct { + ssh.PublicKey + Cert bool +} + +// HostKeys returns a slice of known host public keys for the supplied host:port +// found in the known_hosts file(s), or an empty slice if the host is not +// already known. For hosts that have multiple known_hosts entries (for +// different key types), the result will be sorted by known_hosts filename and +// line number. +// If hkdb was originally created by calling NewDB, the Cert boolean field of +// each result entry reports whether the key corresponded to a @cert-authority +// line. If hkdb was NOT obtained from NewDB, then Cert will always be false. +func (hkdb *HostKeyDB) HostKeys(hostWithPort string) (keys []PublicKey) { + var keyErr *xknownhosts.KeyError + placeholderAddr := &net.TCPAddr{IP: []byte{0, 0, 0, 0}} + placeholderPubKey := &fakePublicKey{} + var kkeys []xknownhosts.KnownKey + callback := hkdb.HostKeyCallback() + if hkcbErr := callback(hostWithPort, placeholderAddr, placeholderPubKey); errors.As(hkcbErr, &keyErr) { + kkeys = append(kkeys, keyErr.Want...) + knownKeyLess := func(i, j int) bool { + if kkeys[i].Filename < kkeys[j].Filename { + return true + } + return (kkeys[i].Filename == kkeys[j].Filename && kkeys[i].Line < kkeys[j].Line) + } + sort.Slice(kkeys, knownKeyLess) + keys = make([]PublicKey, len(kkeys)) + for n := range kkeys { + keys[n] = PublicKey{ + PublicKey: kkeys[n].Key, + } + if len(hkdb.isCert) > 0 { + keys[n].Cert = hkdb.isCert[fmt.Sprintf("%s:%d", kkeys[n].Filename, kkeys[n].Line)] + } + } + } + return keys +} + +// HostKeyAlgorithms returns a slice of host key algorithms for the supplied +// host:port found in the known_hosts file(s), or an empty slice if the host +// is not already known. The result may be used in ssh.ClientConfig's +// HostKeyAlgorithms field, either as-is or after filtering (if you wish to +// ignore or prefer particular algorithms). For hosts that have multiple +// known_hosts entries (of different key types), the result will be sorted by +// known_hosts filename and line number. +// If hkdb was originally created by calling NewDB, any @cert-authority lines +// in the known_hosts file will properly be converted to the corresponding +// ssh.CertAlgo* values. +func (hkdb *HostKeyDB) HostKeyAlgorithms(hostWithPort string) (algos []string) { + // We ensure that algos never contains duplicates. This is done for robustness + // even though currently golang.org/x/crypto/ssh/knownhosts never exposes + // multiple keys of the same type. This way our behavior here is unaffected + // even if https://github.com/golang/go/issues/28870 is implemented, for + // example by https://github.com/golang/crypto/pull/254. + hostKeys := hkdb.HostKeys(hostWithPort) + seen := make(map[string]struct{}, len(hostKeys)) + addAlgo := func(typ string, cert bool) { + if cert { + typ = keyTypeToCertAlgo(typ) + } + if _, already := seen[typ]; !already { + algos = append(algos, typ) + seen[typ] = struct{}{} + } + } + for _, key := range hostKeys { + typ := key.Type() + if typ == ssh.KeyAlgoRSA { + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, + // not public key formats, so they can't appear as a PublicKey.Type. + // The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + addAlgo(ssh.KeyAlgoRSASHA512, key.Cert) + addAlgo(ssh.KeyAlgoRSASHA256, key.Cert) + } + addAlgo(typ, key.Cert) + } + return algos +} + +func keyTypeToCertAlgo(keyType string) string { + switch keyType { + case ssh.KeyAlgoRSA: + return ssh.CertAlgoRSAv01 + case ssh.KeyAlgoRSASHA256: + return ssh.CertAlgoRSASHA256v01 + case ssh.KeyAlgoRSASHA512: + return ssh.CertAlgoRSASHA512v01 + case ssh.KeyAlgoDSA: + return ssh.CertAlgoDSAv01 + case ssh.KeyAlgoECDSA256: + return ssh.CertAlgoECDSA256v01 + case ssh.KeyAlgoSKECDSA256: + return ssh.CertAlgoSKECDSA256v01 + case ssh.KeyAlgoECDSA384: + return ssh.CertAlgoECDSA384v01 + case ssh.KeyAlgoECDSA521: + return ssh.CertAlgoECDSA521v01 + case ssh.KeyAlgoED25519: + return ssh.CertAlgoED25519v01 + case ssh.KeyAlgoSKED25519: + return ssh.CertAlgoSKED25519v01 + } + return "" +} + +// HostKeyCallback wraps ssh.HostKeyCallback with additional methods to +// perform host key and algorithm lookups from the known_hosts entries. It is +// otherwise identical to ssh.HostKeyCallback, and does not introduce any file- +// parsing behavior beyond what is in golang.org/x/crypto/ssh/knownhosts. +// +// In most situations, use HostKeyDB and its constructor NewDB instead of using +// the HostKeyCallback type. The HostKeyCallback type is only provided for +// backwards compatibility with older versions of this package, as well as for +// very strict situations where any extra known_hosts file-parsing is +// undesirable. +// +// Methods of HostKeyCallback do not provide any special treatment for +// @cert-authority lines, which will (incorrectly) look like normal non-CA host +// keys. Additionally, HostKeyCallback lacks the fix for applying * wildcard +// known_host entries to all ports, like OpenSSH's behavior. +type HostKeyCallback ssh.HostKeyCallback + +// New creates a HostKeyCallback from the given OpenSSH known_hosts file(s). The +// returned value may be used in ssh.ClientConfig.HostKeyCallback by casting it +// to ssh.HostKeyCallback, or using its HostKeyCallback method. Otherwise, it +// operates the same as the New function in golang.org/x/crypto/ssh/knownhosts. +// When supplying multiple files, their order does not matter. +// +// In most situations, you should avoid this function, as the returned value +// lacks several enhanced behaviors. See doc comment for HostKeyCallback for +// more information. Instead, most callers should use NewDB to create a +// HostKeyDB, which includes these enhancements. +func New(files ...string) (HostKeyCallback, error) { + cb, err := xknownhosts.New(files...) + return HostKeyCallback(cb), err +} + +// HostKeyCallback simply casts the receiver back to ssh.HostKeyCallback, for +// use in ssh.ClientConfig.HostKeyCallback. +func (hkcb HostKeyCallback) HostKeyCallback() ssh.HostKeyCallback { + return ssh.HostKeyCallback(hkcb) +} + +// ToDB converts the receiver into a HostKeyDB. However, the returned HostKeyDB +// lacks the enhanced behaviors described in the doc comment for NewDB: proper +// CA support, and wildcard matching on nonstandard ports. +// +// It is generally preferable to create a HostKeyDB by using NewDB. The ToDB +// method is only provided for situations in which the calling code needs to +// make the extra NewDB behaviors optional / user-configurable, perhaps for +// reasons of performance or code trust (since NewDB reads the known_host file +// an extra time, which may be undesirable in some strict situations). This way, +// callers can conditionally create a non-enhanced HostKeyDB by using New and +// ToDB. See code example. +func (hkcb HostKeyCallback) ToDB() *HostKeyDB { + // This intentionally leaves the isCert and isWildcard map fields as nil, as + // there is no way to retroactively populate them from just a HostKeyCallback. + // Methods of HostKeyDB will skip any related enhanced behaviors accordingly. + return &HostKeyDB{callback: ssh.HostKeyCallback(hkcb)} +} + +// HostKeys returns a slice of known host public keys for the supplied host:port +// found in the known_hosts file(s), or an empty slice if the host is not +// already known. For hosts that have multiple known_hosts entries (for +// different key types), the result will be sorted by known_hosts filename and +// line number. +// In the returned values, there is no way to distinguish between CA keys +// (known_hosts lines beginning with @cert-authority) and regular keys. To do +// so, see NewDB and HostKeyDB.HostKeys instead. +func (hkcb HostKeyCallback) HostKeys(hostWithPort string) []ssh.PublicKey { + annotatedKeys := hkcb.ToDB().HostKeys(hostWithPort) + rawKeys := make([]ssh.PublicKey, len(annotatedKeys)) + for n, ak := range annotatedKeys { + rawKeys[n] = ak.PublicKey + } + return rawKeys +} + +// HostKeyAlgorithms returns a slice of host key algorithms for the supplied +// host:port found in the known_hosts file(s), or an empty slice if the host +// is not already known. The result may be used in ssh.ClientConfig's +// HostKeyAlgorithms field, either as-is or after filtering (if you wish to +// ignore or prefer particular algorithms). For hosts that have multiple +// known_hosts entries (for different key types), the result will be sorted by +// known_hosts filename and line number. +// The returned values will not include ssh.CertAlgo* values. If any +// known_hosts lines had @cert-authority prefixes, their original key algo will +// be returned instead. For proper CA support, see NewDB and +// HostKeyDB.HostKeyAlgorithms instead. +func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []string) { + return hkcb.ToDB().HostKeyAlgorithms(hostWithPort) +} + +// HostKeyAlgorithms is a convenience function for performing host key algorithm +// lookups on an ssh.HostKeyCallback directly. It is intended for use in code +// paths that stay with the New method of golang.org/x/crypto/ssh/knownhosts +// rather than this package's New or NewDB methods. +// The returned values will not include ssh.CertAlgo* values. If any +// known_hosts lines had @cert-authority prefixes, their original key algo will +// be returned instead. For proper CA support, see NewDB and +// HostKeyDB.HostKeyAlgorithms instead. +func HostKeyAlgorithms(cb ssh.HostKeyCallback, hostWithPort string) []string { + return HostKeyCallback(cb).HostKeyAlgorithms(hostWithPort) +} + +// IsHostKeyChanged returns a boolean indicating whether the error indicates +// the host key has changed. It is intended to be called on the error returned +// from invoking a host key callback, to check whether an SSH host is known. +func IsHostKeyChanged(err error) bool { + var keyErr *xknownhosts.KeyError + return errors.As(err, &keyErr) && len(keyErr.Want) > 0 +} + +// IsHostUnknown returns a boolean indicating whether the error represents an +// unknown host. It is intended to be called on the error returned from invoking +// a host key callback to check whether an SSH host is known. +func IsHostUnknown(err error) bool { + var keyErr *xknownhosts.KeyError + return errors.As(err, &keyErr) && len(keyErr.Want) == 0 +} + +// Normalize normalizes an address into the form used in known_hosts. This +// implementation includes a fix for https://github.com/golang/go/issues/53463 +// and will omit brackets around ipv6 addresses on standard port 22. +func Normalize(address string) string { + host, port, err := net.SplitHostPort(address) + if err != nil { + host = address + port = "22" + } + entry := host + if port != "22" { + entry = "[" + entry + "]:" + port + } else if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + entry = entry[1 : len(entry)-1] + } + return entry +} + +// Line returns a line to append to the known_hosts files. This implementation +// uses the local patched implementation of Normalize in order to solve +// https://github.com/golang/go/issues/53463. +func Line(addresses []string, key ssh.PublicKey) string { + var trimmed []string + for _, a := range addresses { + trimmed = append(trimmed, Normalize(a)) + } + + return strings.Join([]string{ + strings.Join(trimmed, ","), + key.Type(), + base64.StdEncoding.EncodeToString(key.Marshal()), + }, " ") +} + +// WriteKnownHost writes a known_hosts line to w for the supplied hostname, +// remote, and key. This is useful when writing a custom hostkey callback which +// wraps a callback obtained from this package to provide additional known_hosts +// management functionality. The hostname, remote, and key typically correspond +// to the callback's args. This function does not support writing +// @cert-authority lines. +func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.PublicKey) error { + // Always include hostname; only also include remote if it isn't a zero value + // and doesn't normalize to the same string as hostname. + hostnameNormalized := Normalize(hostname) + if strings.ContainsAny(hostnameNormalized, "\t ") { + return fmt.Errorf("knownhosts: hostname '%s' contains spaces", hostnameNormalized) + } + addresses := []string{hostnameNormalized} + remoteStrNormalized := Normalize(remote.String()) + if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != hostnameNormalized && + !strings.ContainsAny(remoteStrNormalized, "\t ") { + addresses = append(addresses, remoteStrNormalized) + } + line := Line(addresses, key) + "\n" + _, err := w.Write([]byte(line)) + return err +} + +// WriteKnownHostCA writes a @cert-authority line to w for the supplied host +// name/pattern and key. +func WriteKnownHostCA(w io.Writer, hostPattern string, key ssh.PublicKey) error { + encodedKey := base64.StdEncoding.EncodeToString(key.Marshal()) + _, err := fmt.Fprintf(w, "@cert-authority %s %s %s\n", hostPattern, key.Type(), encodedKey) + return err +} + +// fakePublicKey is used as part of the work-around for +// https://github.com/golang/go/issues/29286 +type fakePublicKey struct{} + +func (fakePublicKey) Type() string { + return "fake-public-key" +} +func (fakePublicKey) Marshal() []byte { + return []byte("fake public key") +} +func (fakePublicKey) Verify(_ []byte, _ *ssh.Signature) error { + return errors.New("Verify called on placeholder key") +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 0eb1e1d16..91dd430c1 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -12,33 +12,47 @@ import ( "errors" "fmt" "io" + "math" "os" "time" "github.com/google/uuid" ) +var errAlignmentOverflow = errors.New("integer overflow when calculating alignment") + // nextAligned finds the next offset that satisfies alignment. -func nextAligned(offset int64, alignment int) int64 { +func nextAligned(offset int64, alignment int) (int64, error) { align64 := uint64(alignment) offset64 := uint64(offset) - if align64 != 0 && offset64%align64 != 0 { - offset64 = (offset64 & ^(align64 - 1)) + align64 + if align64 <= 0 || offset64%align64 == 0 { + return offset, nil + } + + offset64 += (align64 - offset64%align64) + + if offset64 > math.MaxInt64 { + return 0, errAlignmentOverflow } - return int64(offset64) + //nolint:gosec // Overflow handled above. + return int64(offset64), nil } // writeDataObjectAt writes the data object described by di to ws, using time t, recording details // in d. The object is written at the first position that satisfies the alignment requirements // described by di following offsetUnaligned. func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll - offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + offset, err := nextAligned(offsetUnaligned, di.opts.alignment) if err != nil { return err } + if _, err := ws.Seek(offset, io.SeekStart); err != nil { + return err + } + n, err := io.Copy(ws, di.r) if err != nil { return err @@ -72,6 +86,7 @@ func (f *FileImage) calculatedDataSize() int64 { var ( errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") errPrimaryPartition = errors.New("image already contains a primary partition") + errObjectIDOverflow = errors.New("object ID would overflow") ) // writeDataObject writes the data object described by di to f, using time t, recording details in @@ -81,6 +96,11 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro return errInsufficientCapacity } + // We derive the ID from i, so make sure the ID will not overflow. + if int64(i) >= math.MaxUint32 { + return errObjectIDOverflow + } + // If this is a primary partition, verify there isn't another primary partition, and update the // architecture in the global header. if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys { @@ -92,7 +112,7 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro } d := &f.rds[i] - d.ID = uint32(i) + 1 + d.ID = uint32(i) + 1 //nolint:gosec // Overflow handled above. f.h.DataSize = f.calculatedDataSize() @@ -213,8 +233,16 @@ func OptCreateWithCloseOnUnload(b bool) CreateOpt { } } +var errDescriptorCapacityNotSupported = errors.New("descriptor capacity not supported") + // createContainer creates a new SIF container file in rw, according to opts. func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + // The supported number of descriptors is limited by the unsigned 32-bit ID field in each + // rawDescriptor. + if co.descriptorCapacity >= math.MaxUint32 { + return nil, errDescriptorCapacityNotSupported + } + rds := make([]rawDescriptor, co.descriptorCapacity) rdsSize := int64(binary.Size(rds)) diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index af006fc92..6a6b3e018 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -56,6 +56,11 @@ func (tr *Reader) RawBytes() []byte { } +// ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next() +func (tr *Reader) ExpectedPadding() int64 { + return tr.pad +} + // NewReader creates a new Reader reading from r. func NewReader(r io.Reader) *Reader { return &Reader{r: r, curr: ®FileReader{r, 0}} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go b/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go new file mode 100644 index 000000000..8a65887cf --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go @@ -0,0 +1,57 @@ +package asm + +import ( + "bytes" + "fmt" + "io" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// IterateHeaders calls handler for each tar header provided by Unpacker +func IterateHeaders(unpacker storage.Unpacker, handler func(hdr *tar.Header) error) error { + // We assume about NewInputTarStream: + // - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions + // - (There is a FileType entry for every tar header, we ignore it) + // - Trailing padding of a file, if any, is included in the next SegmentType entry + // - At the end, there may be SegmentType entries just for the terminating zero blocks. + + var pendingPadding int64 = 0 + for { + tsEntry, err := unpacker.Next() + if err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("reading tar-split entries: %w", err) + } + switch tsEntry.Type { + case storage.SegmentType: + payload := tsEntry.Payload + if int64(len(payload)) < pendingPadding { + return fmt.Errorf("expected %d bytes of padding after previous file, but next SegmentType only has %d bytes", pendingPadding, len(payload)) + } + payload = payload[pendingPadding:] + pendingPadding = 0 + + tr := tar.NewReader(bytes.NewReader(payload)) + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that. + break + } + return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err) + } + if err := handler(hdr); err != nil { + return err + } + pendingPadding = tr.ExpectedPadding() + + case storage.FileType: + // Nothing + default: + return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type) + } + } +} diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 2ccd82afd..73753f029 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -19,40 +19,38 @@ type Bar struct { priority int // used by heap frameCh chan *renderFrame operateState chan func(*bState) - done chan struct{} container *Progress bs *bState + bsOk chan struct{} + ctx context.Context cancel func() } type syncTable [2][]chan int -type extenderFunc func([]io.Reader, decor.Statistics) ([]io.Reader, error) +type extenderFunc func(decor.Statistics, ...io.Reader) ([]io.Reader, error) // bState is actual bar's state. type bState struct { - id int - priority int - reqWidth int - shutdown int - total int64 - current int64 - refill int64 - trimSpace bool - completed bool - aborted bool - triggerComplete bool - rmOnComplete bool - noPop bool - autoRefresh bool - buffers [3]*bytes.Buffer - decorators [2][]decor.Decorator - averageDecorators []decor.AverageDecorator - ewmaDecorators []decor.EwmaDecorator - shutdownListeners []decor.ShutdownListener - filler BarFiller - extender extenderFunc - renderReq chan<- time.Time - waitBar *Bar // key for (*pState).queueBars + id int + priority int + reqWidth int + shutdown int + total int64 + current int64 + refill int64 + trimSpace bool + aborted bool + triggerComplete bool + rmOnComplete bool + noPop bool + autoRefresh bool + buffers [3]*bytes.Buffer + decorators [2][]decor.Decorator + ewmaDecorators []decor.EwmaDecorator + filler BarFiller + extender extenderFunc + renderReq chan<- time.Time + waitBar *Bar // key for (*pState).queueBars } type renderFrame struct { @@ -70,13 +68,14 @@ func newBar(ctx context.Context, container *Progress, bs *bState) *Bar { priority: bs.priority, frameCh: make(chan *renderFrame, 1), operateState: make(chan func(*bState)), - done: make(chan struct{}), + bsOk: make(chan struct{}), container: container, + ctx: ctx, cancel: cancel, } container.bwg.Add(1) - go bar.serve(ctx, bs) + go bar.serve(bs) return bar } @@ -89,11 +88,13 @@ func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } - result := make(chan bool) + result := make(chan io.ReadCloser) select { - case b.operateState <- func(s *bState) { result <- len(s.ewmaDecorators) != 0 }: - return newProxyReader(r, b, <-result) - case <-b.done: + case b.operateState <- func(s *bState) { + result <- newProxyReader(r, b, len(s.ewmaDecorators) != 0) + }: + return <-result + case <-b.ctx.Done(): return nil } } @@ -105,11 +106,13 @@ func (b *Bar) ProxyWriter(w io.Writer) io.WriteCloser { if w == nil { panic("expected non nil io.Writer") } - result := make(chan bool) + result := make(chan io.WriteCloser) select { - case b.operateState <- func(s *bState) { result <- len(s.ewmaDecorators) != 0 }: - return newProxyWriter(w, b, <-result) - case <-b.done: + case b.operateState <- func(s *bState) { + result <- newProxyWriter(w, b, len(s.ewmaDecorators) != 0) + }: + return <-result + case <-b.ctx.Done(): return nil } } @@ -120,7 +123,7 @@ func (b *Bar) ID() int { select { case b.operateState <- func(s *bState) { result <- s.id }: return <-result - case <-b.done: + case <-b.bsOk: return b.bs.id } } @@ -131,7 +134,7 @@ func (b *Bar) Current() int64 { select { case b.operateState <- func(s *bState) { result <- s.current }: return <-result - case <-b.done: + case <-b.bsOk: return b.bs.current } } @@ -149,26 +152,30 @@ func (b *Bar) SetRefill(amount int64) { s.refill = s.current } }: - case <-b.done: + case <-b.ctx.Done(): } } -// TraverseDecorators traverses all available decorators and calls cb func on each. +// TraverseDecorators traverses available decorators and calls cb func +// on each in a new goroutine. Decorators implementing decor.Wrapper +// interface are unwrapped first. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { - iter := make(chan decor.Decorator) select { case b.operateState <- func(s *bState) { + var wg sync.WaitGroup for _, decorators := range s.decorators { + wg.Add(len(decorators)) for _, d := range decorators { - iter <- d + d := d + go func() { + cb(unwrap(d)) + wg.Done() + }() } } - close(iter) + wg.Wait() }: - for d := range iter { - cb(unwrap(d)) - } - case <-b.done: + case <-b.ctx.Done(): } } @@ -183,13 +190,12 @@ func (b *Bar) EnableTriggerComplete() { } if s.current >= s.total { s.current = s.total - s.completed = true - b.triggerCompletion(s) + s.triggerCompletion(b) } else { s.triggerComplete = true } }: - case <-b.done: + case <-b.ctx.Done(): } } @@ -211,11 +217,10 @@ func (b *Bar) SetTotal(total int64, complete bool) { } if complete { s.current = s.total - s.completed = true - b.triggerCompletion(s) + s.triggerCompletion(b) } }: - case <-b.done: + case <-b.ctx.Done(): } } @@ -229,31 +234,10 @@ func (b *Bar) SetCurrent(current int64) { s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total - s.completed = true - b.triggerCompletion(s) + s.triggerCompletion(b) } }: - case <-b.done: - } -} - -// EwmaSetCurrent sets progress' current to an arbitrary value and updates -// EWMA based decorators by dur of a single iteration. -func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { - if current < 0 { - return - } - select { - case b.operateState <- func(s *bState) { - s.decoratorEwmaUpdate(current-s.current, iterDur) - s.current = current - if s.triggerComplete && s.current >= s.total { - s.current = s.total - s.completed = true - b.triggerCompletion(s) - } - }: - case <-b.done: + case <-b.ctx.Done(): } } @@ -269,19 +253,15 @@ func (b *Bar) IncrBy(n int) { // IncrInt64 increments progress by amount of n. func (b *Bar) IncrInt64(n int64) { - if n <= 0 { - return - } select { case b.operateState <- func(s *bState) { s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total - s.completed = true - b.triggerCompletion(s) + s.triggerCompletion(b) } }: - case <-b.done: + case <-b.ctx.Done(): } } @@ -300,28 +280,65 @@ func (b *Bar) EwmaIncrBy(n int, iterDur time.Duration) { func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { select { case b.operateState <- func(s *bState) { - s.decoratorEwmaUpdate(n, iterDur) + var wg sync.WaitGroup + wg.Add(len(s.ewmaDecorators)) + for _, d := range s.ewmaDecorators { + d := d + go func() { + d.EwmaUpdate(n, iterDur) + wg.Done() + }() + } s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total - s.completed = true - b.triggerCompletion(s) + s.triggerCompletion(b) } + wg.Wait() }: - case <-b.done: + case <-b.ctx.Done(): } } -// DecoratorAverageAdjust adjusts all average based decorators. Call -// if you need to adjust start time of all average based decorators -// or after progress resume. -func (b *Bar) DecoratorAverageAdjust(start time.Time) { +// EwmaSetCurrent sets progress' current to an arbitrary value and updates +// EWMA based decorators by dur of a single iteration. +func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { + if current < 0 { + return + } select { - case b.operateState <- func(s *bState) { s.decoratorAverageAdjust(start) }: - case <-b.done: + case b.operateState <- func(s *bState) { + n := current - s.current + var wg sync.WaitGroup + wg.Add(len(s.ewmaDecorators)) + for _, d := range s.ewmaDecorators { + d := d + go func() { + d.EwmaUpdate(n, iterDur) + wg.Done() + }() + } + s.current = current + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.triggerCompletion(b) + } + wg.Wait() + }: + case <-b.ctx.Done(): } } +// DecoratorAverageAdjust adjusts decorators implementing decor.AverageDecorator interface. +// Call if there is need to set start time after decorators have been constructed. +func (b *Bar) DecoratorAverageAdjust(start time.Time) { + b.TraverseDecorators(func(d decor.Decorator) { + if d, ok := d.(decor.AverageDecorator); ok { + d.AverageAdjust(start) + } + }) +} + // SetPriority changes bar's order among multiple bars. Zero is highest // priority, i.e. bar will be on top. If you don't need to set priority // dynamically, better use BarPriority option. @@ -336,14 +353,14 @@ func (b *Bar) SetPriority(priority int) { func (b *Bar) Abort(drop bool) { select { case b.operateState <- func(s *bState) { - if s.completed || s.aborted { + if s.aborted || s.completed() { return } s.aborted = true s.rmOnComplete = drop - b.triggerCompletion(s) + s.triggerCompletion(b) }: - case <-b.done: + case <-b.ctx.Done(): } } @@ -353,7 +370,7 @@ func (b *Bar) Aborted() bool { select { case b.operateState <- func(s *bState) { result <- s.aborted }: return <-result - case <-b.done: + case <-b.bsOk: return b.bs.aborted } } @@ -362,41 +379,52 @@ func (b *Bar) Aborted() bool { func (b *Bar) Completed() bool { result := make(chan bool) select { - case b.operateState <- func(s *bState) { result <- s.completed }: + case b.operateState <- func(s *bState) { result <- s.completed() }: return <-result - case <-b.done: - return b.bs.completed + case <-b.bsOk: + return b.bs.completed() } } -// IsRunning reports whether the bar is running, i.e. not yet completed -// and not yet aborted. +// IsRunning reports whether the bar is in running state. func (b *Bar) IsRunning() bool { - result := make(chan bool) select { - case b.operateState <- func(s *bState) { result <- !s.completed && !s.aborted }: - return <-result - case <-b.done: + case <-b.ctx.Done(): return false + default: + return true } } // Wait blocks until bar is completed or aborted. func (b *Bar) Wait() { - <-b.done + <-b.bsOk } -func (b *Bar) serve(ctx context.Context, bs *bState) { - defer b.container.bwg.Done() +func (b *Bar) serve(bs *bState) { + decoratorsOnShutdown := func(decorators []decor.Decorator) { + for _, d := range decorators { + if d, ok := unwrap(d).(decor.ShutdownListener); ok { + b.container.bwg.Add(1) + go func() { + d.OnShutdown() + b.container.bwg.Done() + }() + } + } + } for { select { case op := <-b.operateState: op(bs) - case <-ctx.Done(): - bs.aborted = !bs.completed - bs.decoratorShutdownNotify() + case <-b.ctx.Done(): + decoratorsOnShutdown(bs.decorators[0]) + decoratorsOnShutdown(bs.decorators[1]) + // bar can be aborted by canceling parent ctx without calling b.Abort + bs.aborted = !bs.completed() b.bs = bs - close(b.done) + close(b.bsOk) + b.container.bwg.Done() return } } @@ -405,7 +433,7 @@ func (b *Bar) serve(ctx context.Context, bs *bState) { func (b *Bar) render(tw int) { fn := func(s *bState) { frame := new(renderFrame) - stat := newStatistics(tw, s) + stat := s.newStatistics(tw) r, err := s.draw(stat) if err != nil { for _, buf := range s.buffers { @@ -415,11 +443,8 @@ func (b *Bar) render(tw int) { b.frameCh <- frame return } - frame.rows = append(frame.rows, r) - if s.extender != nil { - frame.rows, frame.err = s.extender(frame.rows, stat) - } - if s.completed || s.aborted { + frame.rows, frame.err = s.extender(stat, r) + if s.aborted || s.completed() { frame.shutdown = s.shutdown frame.rmOnComplete = s.rmOnComplete frame.noPop = s.noPop @@ -430,22 +455,11 @@ func (b *Bar) render(tw int) { } select { case b.operateState <- fn: - case <-b.done: + case <-b.bsOk: fn(b.bs) } } -func (b *Bar) triggerCompletion(s *bState) { - if s.autoRefresh { - // Technically this call isn't required, but if refresh rate is set to - // one hour for example and bar completes within a few minutes p.Wait() - // will wait for one hour. This call helps to avoid unnecessary waiting. - go b.tryEarlyRefresh(s.renderReq) - } else { - b.cancel() - } -} - func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { var otherRunning int b.container.traverseBars(func(bar *Bar) bool { @@ -459,7 +473,7 @@ func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { for { select { case renderReq <- time.Now(): - case <-b.done: + case <-b.ctx.Done(): return } } @@ -471,7 +485,7 @@ func (b *Bar) wSyncTable() syncTable { select { case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: return <-result - case <-b.done: + case <-b.bsOk: return b.bs.wSyncTable() } } @@ -551,58 +565,31 @@ func (s *bState) wSyncTable() (table syncTable) { return table } -func (s bState) decoratorEwmaUpdate(n int64, dur time.Duration) { - var wg sync.WaitGroup - for i := 0; i < len(s.ewmaDecorators); i++ { - switch d := s.ewmaDecorators[i]; i { - case len(s.ewmaDecorators) - 1: - d.EwmaUpdate(n, dur) - default: - wg.Add(1) - go func() { - d.EwmaUpdate(n, dur) - wg.Done() - }() +func (s *bState) populateEwmaDecorators(decorators []decor.Decorator) { + for _, d := range decorators { + if d, ok := unwrap(d).(decor.EwmaDecorator); ok { + s.ewmaDecorators = append(s.ewmaDecorators, d) } } - wg.Wait() } -func (s bState) decoratorAverageAdjust(start time.Time) { - var wg sync.WaitGroup - for i := 0; i < len(s.averageDecorators); i++ { - switch d := s.averageDecorators[i]; i { - case len(s.averageDecorators) - 1: - d.AverageAdjust(start) - default: - wg.Add(1) - go func() { - d.AverageAdjust(start) - wg.Done() - }() - } +func (s *bState) triggerCompletion(b *Bar) { + s.triggerComplete = true + if s.autoRefresh { + // Technically this call isn't required, but if refresh rate is set to + // one hour for example and bar completes within a few minutes p.Wait() + // will wait for one hour. This call helps to avoid unnecessary waiting. + go b.tryEarlyRefresh(s.renderReq) + } else { + b.cancel() } - wg.Wait() } -func (s bState) decoratorShutdownNotify() { - var wg sync.WaitGroup - for i := 0; i < len(s.shutdownListeners); i++ { - switch d := s.shutdownListeners[i]; i { - case len(s.shutdownListeners) - 1: - d.OnShutdown() - default: - wg.Add(1) - go func() { - d.OnShutdown() - wg.Done() - }() - } - } - wg.Wait() +func (s bState) completed() bool { + return s.triggerComplete && s.current == s.total } -func newStatistics(tw int, s *bState) decor.Statistics { +func (s bState) newStatistics(tw int) decor.Statistics { return decor.Statistics{ AvailableWidth: tw, RequestedWidth: s.reqWidth, @@ -610,7 +597,7 @@ func newStatistics(tw int, s *bState) decor.Statistics { Total: s.total, Current: s.current, Refill: s.refill, - Completed: s.completed, + Completed: s.completed(), Aborted: s.aborted, } } diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go index 3860d2f65..7a036d987 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -50,13 +50,13 @@ type flushSection struct { } type bFiller struct { - components [components]component - meta [components]func(io.Writer, []byte) error - flush func(io.Writer, ...flushSection) error - tipOnComplete bool - tip struct { - frames []component - count uint + components [components]component + meta [components]func(io.Writer, []byte) error + flush func(io.Writer, ...flushSection) error + tip struct { + onComplete bool + count uint + frames []component } } @@ -155,8 +155,7 @@ func (s barStyle) Reverse() BarStyleComposer { func (s barStyle) Build() BarFiller { bf := &bFiller{ - meta: s.metaFuncs, - tipOnComplete: s.tipOnComplete, + meta: s.metaFuncs, } bf.components[iLbound] = component{ width: runewidth.StringWidth(s.style[iLbound]), @@ -178,6 +177,7 @@ func (s barStyle) Build() BarFiller { width: runewidth.StringWidth(s.style[iPadding]), bytes: []byte(s.style[iPadding]), } + bf.tip.onComplete = s.tipOnComplete bf.tip.frames = make([]component, len(s.tipFrames)) for i, t := range s.tipFrames { bf.tip.frames[i] = component{ @@ -233,17 +233,17 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { var tip component var refilling, filling, padding []byte var fillCount int - curWidth := int(internal.PercentageRound(stat.Total, stat.Current, int64(width))) + curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) if curWidth != 0 { - if !stat.Completed || s.tipOnComplete { + if !stat.Completed || s.tip.onComplete { tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))] s.tip.count++ fillCount += tip.width } switch refWidth := 0; { case stat.Refill != 0: - refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, int64(width))) + refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) curWidth -= refWidth refWidth += curWidth fallthrough diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_option.go b/vendor/github.com/vbauerster/mpb/v8/bar_option.go index d5aa5b9fa..6247a334c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -24,6 +24,7 @@ func inspect(decorators []decor.Decorator) (dest []decor.Decorator) { func PrependDecorators(decorators ...decor.Decorator) BarOption { decorators = inspect(decorators) return func(s *bState) { + s.populateEwmaDecorators(decorators) s.decorators[0] = decorators } } @@ -32,6 +33,7 @@ func PrependDecorators(decorators ...decor.Decorator) BarOption { func AppendDecorators(decorators ...decor.Decorator) BarOption { decorators = inspect(decorators) return func(s *bState) { + s.populateEwmaDecorators(decorators) s.decorators[1] = decorators } } @@ -112,6 +114,9 @@ func BarExtender(filler BarFiller, rev bool) BarOption { if filler == nil { return nil } + if f, ok := filler.(BarFillerFunc); ok && f == nil { + return nil + } fn := makeExtenderFunc(filler, rev) return func(s *bState) { s.extender = fn @@ -120,28 +125,27 @@ func BarExtender(filler BarFiller, rev bool) BarOption { func makeExtenderFunc(filler BarFiller, rev bool) extenderFunc { buf := new(bytes.Buffer) - base := func(rows []io.Reader, stat decor.Statistics) ([]io.Reader, error) { + base := func(stat decor.Statistics, rows ...io.Reader) ([]io.Reader, error) { err := filler.Fill(buf, stat) if err != nil { buf.Reset() return rows, err } for { - b, err := buf.ReadBytes('\n') + line, err := buf.ReadBytes('\n') if err != nil { buf.Reset() break } - rows = append(rows, bytes.NewReader(b)) + rows = append(rows, bytes.NewReader(line)) } return rows, err } - if !rev { return base } - return func(rows []io.Reader, stat decor.Statistics) ([]io.Reader, error) { - rows, err := base(rows, stat) + return func(stat decor.Statistics, rows ...io.Reader) ([]io.Reader, error) { + rows, err := base(stat, rows...) if err != nil { return rows, err } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go index 6cee7d1d4..f3ed7a8e6 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go @@ -17,15 +17,15 @@ func Elapsed(style TimeStyle, wcc ...WC) Decorator { // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // -// `startTime` start time +// `start` start time // // `wcc` optional WC config -func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator { +func NewElapsed(style TimeStyle, start time.Time, wcc ...WC) Decorator { var msg string producer := chooseTimeProducer(style) fn := func(s Statistics) string { - if !s.Completed { - msg = producer(time.Since(startTime)) + if !s.Completed && !s.Aborted { + msg = producer(time.Since(start)) } return msg } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index cbe657f1f..bf48d3eef 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -33,13 +33,18 @@ func (f TimeNormalizerFunc) Normalize(src time.Duration) time.Duration { // decorator to work correctly you have to measure each iteration's duration // and pass it to one of the (*Bar).EwmaIncr... family methods. func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { + return EwmaNormalizedETA(style, age, nil, wcc...) +} + +// EwmaNormalizedETA same as EwmaETA but with TimeNormalizer option. +func EwmaNormalizedETA(style TimeStyle, age float64, normalizer TimeNormalizer, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { average = ewma.NewMovingAverage() } else { average = ewma.NewMovingAverage(age) } - return MovingAverageETA(style, average, nil, wcc...) + return MovingAverageETA(style, average, normalizer, wcc...) } // MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. @@ -52,6 +57,9 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { // // `wcc` optional WC config func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { + if average == nil { + average = NewMedian() + } d := &movingAverageETA{ WC: initWC(wcc...), producer: chooseTimeProducer(style), @@ -105,15 +113,15 @@ func AverageETA(style TimeStyle, wcc ...WC) Decorator { // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // -// `startTime` start time +// `start` start time // // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config -func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { +func NewAverageETA(style TimeStyle, start time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &averageETA{ WC: initWC(wcc...), - startTime: startTime, + start: start, normalizer: normalizer, producer: chooseTimeProducer(style), } @@ -122,7 +130,7 @@ func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormaliz type averageETA struct { WC - startTime time.Time + start time.Time normalizer TimeNormalizer producer func(time.Duration) string } @@ -130,7 +138,7 @@ type averageETA struct { func (d *averageETA) Decor(s Statistics) (string, int) { var remaining time.Duration if s.Current != 0 { - durPerItem := float64(time.Since(d.startTime)) / float64(s.Current) + durPerItem := float64(time.Since(d.start)) / float64(s.Current) durPerItem = math.Round(durPerItem) remaining = time.Duration((s.Total - s.Current) * int64(durPerItem)) if d.normalizer != nil { @@ -140,8 +148,8 @@ func (d *averageETA) Decor(s Statistics) (string, int) { return d.Format(d.producer(remaining)) } -func (d *averageETA) AverageAdjust(startTime time.Time) { - d.startTime = startTime +func (d *averageETA) AverageAdjust(start time.Time) { + d.start = start } // MaxTolerateTimeNormalizer returns implementation of TimeNormalizer. @@ -156,7 +164,10 @@ func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer { } normalized -= time.Since(lastCall) lastCall = time.Now() - return normalized + if normalized > 0 { + return normalized + } + return remaining }) } @@ -175,7 +186,10 @@ func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer { count-- normalized -= time.Since(lastCall) lastCall = time.Now() - return normalized + if normalized > 0 { + return normalized + } + return remaining }) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go index 298fc1a16..547117b25 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go @@ -61,7 +61,7 @@ func NewPercentage(format string, wcc ...WC) Decorator { format = "% d" } f := func(s Statistics) string { - p := internal.PercentageRound(s.Total, s.Current, 100) + p := internal.Percentage(uint(s.Total), uint(s.Current), 100) return fmt.Sprintf(format, percentageType(p)) } return Any(f, wcc...) diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index 50415fd9e..b643e10fb 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -21,15 +21,15 @@ var ( // // fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048))) func FmtAsSpeed(input fmt.Formatter) fmt.Formatter { - return speedFormatter{input} + return &speedFormatter{input} } type speedFormatter struct { fmt.Formatter } -func (self speedFormatter) Format(st fmt.State, verb rune) { - self.Formatter.Format(st, verb) +func (s *speedFormatter) Format(st fmt.State, verb rune) { + s.Formatter.Format(st, verb) _, err := io.WriteString(st, "/s") if err != nil { panic(err) @@ -120,7 +120,7 @@ func AverageSpeed(unit interface{}, format string, wcc ...WC) Decorator { // // `format` printf compatible verb for value, like "%f" or "%d" // -// `startTime` start time +// `start` start time // // `wcc` optional WC config // @@ -130,32 +130,32 @@ func AverageSpeed(unit interface{}, format string, wcc ...WC) Decorator { // unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" // unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" // unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" -func NewAverageSpeed(unit interface{}, format string, startTime time.Time, wcc ...WC) Decorator { +func NewAverageSpeed(unit interface{}, format string, start time.Time, wcc ...WC) Decorator { d := &averageSpeed{ - WC: initWC(wcc...), - startTime: startTime, - producer: chooseSpeedProducer(unit, format), + WC: initWC(wcc...), + start: start, + producer: chooseSpeedProducer(unit, format), } return d } type averageSpeed struct { WC - startTime time.Time - producer func(float64) string - msg string + start time.Time + producer func(float64) string + msg string } func (d *averageSpeed) Decor(s Statistics) (string, int) { if !s.Completed { - speed := float64(s.Current) / float64(time.Since(d.startTime)) + speed := float64(s.Current) / float64(time.Since(d.start)) d.msg = d.producer(speed * 1e9) } return d.Format(d.msg) } -func (d *averageSpeed) AverageAdjust(startTime time.Time) { - d.startTime = startTime +func (d *averageSpeed) AverageAdjust(start time.Time) { + d.start = start } func chooseSpeedProducer(unit interface{}, format string) func(float64) string { diff --git a/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go index 988227d37..e25cf9992 100644 --- a/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go @@ -14,9 +14,9 @@ func Percentage(total, current, width uint) float64 { } // PercentageRound same as Percentage but with math.Round. -func PercentageRound(total, current, width int64) float64 { - if total < 0 || current < 0 || width < 0 { +func PercentageRound(total, current int64, width uint) float64 { + if total < 0 || current < 0 { return 0 } - return math.Round(Percentage(uint(total), uint(current), uint(width))) + return math.Round(Percentage(uint(total), uint(current), width)) } diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 014a0e4a3..5c57eafa9 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -126,6 +126,9 @@ func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { // New creates a bar by calling `Build` method on provided `BarFillerBuilder`. func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + if builder == nil { + return p.MustAdd(total, nil, options...) + } return p.MustAdd(total, builder.Build(), options...) } @@ -147,12 +150,10 @@ func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Bar, error) { if filler == nil { filler = NopStyle().Build() + } else if f, ok := filler.(BarFillerFunc); ok && f == nil { + filler = NopStyle().Build() } - type result struct { - bar *Bar - bs *bState - } - ch := make(chan result) + ch := make(chan *Bar) select { case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) @@ -163,22 +164,9 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Ba ps.hm.push(bar, true) } ps.idCount++ - ch <- result{bar, bs} + ch <- bar }: - res := <-ch - bar, bs := res.bar, res.bs - bar.TraverseDecorators(func(d decor.Decorator) { - if d, ok := d.(decor.AverageDecorator); ok { - bs.averageDecorators = append(bs.averageDecorators, d) - } - if d, ok := d.(decor.EwmaDecorator); ok { - bs.ewmaDecorators = append(bs.ewmaDecorators, d) - } - if d, ok := d.(decor.ShutdownListener); ok { - bs.shutdownListeners = append(bs.shutdownListeners, d) - } - }) - return bar, nil + return <-ch, nil case <-p.done: return nil, DoneError } @@ -237,13 +225,12 @@ func (p *Progress) Write(b []byte) (int, error) { // Wait waits for all bars to complete and finally shutdowns container. After // this method has been called, there is no way to reuse `*Progress` instance. func (p *Progress) Wait() { + p.bwg.Wait() + p.Shutdown() // wait for user wg, if any if p.uwg != nil { p.uwg.Wait() } - - p.bwg.Wait() - p.Shutdown() } // Shutdown cancels any running bar immediately and then shutdowns `*Progress` @@ -453,6 +440,9 @@ func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption filler: filler, renderReq: s.renderReq, autoRefresh: s.autoRefresh, + extender: func(_ decor.Statistics, rows ...io.Reader) ([]io.Reader, error) { + return rows, nil + }, } if total > 0 { diff --git a/vendor/github.com/vbauerster/mpb/v8/proxyreader.go b/vendor/github.com/vbauerster/mpb/v8/proxyreader.go index b0e7720b9..8c324f894 100644 --- a/vendor/github.com/vbauerster/mpb/v8/proxyreader.go +++ b/vendor/github.com/vbauerster/mpb/v8/proxyreader.go @@ -69,28 +69,5 @@ func toReadCloser(r io.Reader) io.ReadCloser { if rc, ok := r.(io.ReadCloser); ok { return rc } - return toNopReadCloser(r) -} - -func toNopReadCloser(r io.Reader) io.ReadCloser { - if _, ok := r.(io.WriterTo); ok { - return nopReadCloserWriterTo{r} - } - return nopReadCloser{r} -} - -type nopReadCloser struct { - io.Reader -} - -func (nopReadCloser) Close() error { return nil } - -type nopReadCloserWriterTo struct { - io.Reader -} - -func (nopReadCloserWriterTo) Close() error { return nil } - -func (c nopReadCloserWriterTo) WriteTo(w io.Writer) (int64, error) { - return c.Reader.(io.WriterTo).WriteTo(w) + return io.NopCloser(r) } diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore index 9f11b755a..66f8fb502 100644 --- a/vendor/github.com/vishvananda/netlink/.gitignore +++ b/vendor/github.com/vishvananda/netlink/.gitignore @@ -1 +1,2 @@ .idea/ +.vscode/ diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 72862ce1f..218ab2379 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -74,17 +74,19 @@ func (h *Handle) AddrDel(link Link, addr *Addr) error { } func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error { - base := link.Attrs() - if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { - return fmt.Errorf("label must begin with interface name") - } - h.ensureIndex(base) - family := nl.GetIPFamily(addr.IP) - msg := nl.NewIfAddrmsg(family) - msg.Index = uint32(base.Index) msg.Scope = uint8(addr.Scope) + if link == nil { + msg.Index = uint32(addr.LinkIndex) + } else { + base := link.Attrs() + if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { + return fmt.Errorf("label must begin with interface name") + } + h.ensureIndex(base) + msg.Index = uint32(base.Index) + } mask := addr.Mask if addr.Peer != nil { mask = addr.Peer.Mask @@ -296,23 +298,24 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // AddrSubscribeOptions contains a set of options to use with // AddrSubscribeWithOptions. type AddrSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool - ReceiveBufferSize int - ReceiveTimeout *unix.Timeval + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -323,10 +326,12 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize, options.ReceiveTimeout) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int, rcvTimeout *unix.Timeval) error { +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvBufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err @@ -336,19 +341,18 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c return err } } - + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvBufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done s.Close() }() } - if rcvbuf != 0 { - err = pkgHandle.SetSocketReceiveBufferSize(rcvbuf, false) - if err != nil { - return err - } - } if listExisting { req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index 6e1224c47..6c340b0ce 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -63,7 +63,19 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, 0, pvid, untagged, self, master) +} + +// BridgeVlanAddRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanAddRange(link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanAddRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -75,10 +87,22 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, 0, pvid, untagged, self, master) } -func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { +// BridgeVlanDelRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanDelRange(link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanDelRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, pvid, untagged, self, master) +} + +func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) @@ -105,7 +129,20 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged if untagged { vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED } - br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + + if vidEnd != 0 { + vlanEndInfo := &nl.BridgeVlanInfo{Vid: vidEnd} + vlanEndInfo.Flags = vlanInfo.Flags + + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_BEGIN + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + + vlanEndInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_END + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanEndInfo.Serialize()) + } else { + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + } + req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err diff --git a/vendor/github.com/vishvananda/netlink/chain.go b/vendor/github.com/vishvananda/netlink/chain.go new file mode 100644 index 000000000..1d1c144e9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain.go @@ -0,0 +1,22 @@ +package netlink + +import ( + "fmt" +) + +// Chain contains the attributes of a Chain +type Chain struct { + Parent uint32 + Chain uint32 +} + +func (c Chain) String() string { + return fmt.Sprintf("{Parent: %d, Chain: %d}", c.Parent, c.Chain) +} + +func NewChain(parent uint32, chain uint32) Chain { + return Chain{ + Parent: parent, + Chain: chain, + } +} diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go new file mode 100644 index 000000000..d9f441613 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain_linux.go @@ -0,0 +1,112 @@ +package netlink + +import ( + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// ChainDel will delete a chain from the system. +func ChainDel(link Link, chain Chain) error { + // Equivalent to: `tc chain del $chain` + return pkgHandle.ChainDel(link, chain) +} + +// ChainDel will delete a chain from the system. +// Equivalent to: `tc chain del $chain` +func (h *Handle) ChainDel(link Link, chain Chain) error { + return h.chainModify(unix.RTM_DELCHAIN, 0, link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func ChainAdd(link Link, chain Chain) error { + return pkgHandle.ChainAdd(link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func (h *Handle) ChainAdd(link Link, chain Chain) error { + return h.chainModify( + unix.RTM_NEWCHAIN, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, + link, + chain) +} + +func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error { + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: chain.Parent, + } + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(chain.Chain))) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func ChainList(link Link, parent uint32) ([]Chain, error) { + return pkgHandle.ChainList(link, parent) +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { + req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: parent, + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) + if err != nil { + return nil, err + } + + var res []Chain + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip chains from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + var chain Chain + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_CHAIN: + chain.Chain = native.Uint32(attr.Value) + chain.Parent = parent + } + } + res = append(res, chain) + } + + return res, nil +} diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go index 10ceffed8..e686f6745 100644 --- a/vendor/github.com/vishvananda/netlink/class.go +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -47,6 +47,7 @@ type ClassStatistics struct { Basic *GnetStatsBasic Queue *GnetStatsQueue RateEst *GnetStatsRateEst + BasicHw *GnetStatsBasic // Hardward statistics added in kernel 4.20 } // NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0. @@ -55,6 +56,7 @@ func NewClassStatistics() *ClassStatistics { Basic: &GnetStatsBasic{}, Queue: &GnetStatsQueue{}, RateEst: &GnetStatsRateEst{}, + BasicHw: &GnetStatsBasic{}, } } diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index 6f542ba4e..a82eb09de 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -388,6 +388,11 @@ func parseTcStats2(data []byte) (*ClassStatistics, error) { return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s", err, hex.Dump(datum.Value)) } + case nl.TCA_STATS_BASIC_HW: + if err := parseGnetStats(datum.Value, stats.BasicHw); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.BasicHw with: %v\n%s", + err, hex.Dump(datum.Value)) + } } } diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index 03ea1b98f..ba022453b 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -55,10 +55,30 @@ func ConntrackTableFlush(table ConntrackTableType) error { return pkgHandle.ConntrackTableFlush(table) } +// ConntrackCreate creates a new conntrack flow in the desired table +// conntrack -I [table] Create a conntrack or expectation +func ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + return pkgHandle.ConntrackCreate(table, family, flow) +} + +// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle +// conntrack -U [table] Update a conntrack +func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + return pkgHandle.ConntrackUpdate(table, family, flow) +} + // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [ConntrackDeleteFilter] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { - return pkgHandle.ConntrackDeleteFilter(table, family, filter) + return pkgHandle.ConntrackDeleteFilters(table, family, filter) +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return pkgHandle.ConntrackDeleteFilters(table, family, filters...) } // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed @@ -87,9 +107,51 @@ func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { return err } +// ConntrackCreate creates a new conntrack flow in the desired table using the handle +// conntrack -I [table] Create a conntrack or expectation +func (h *Handle) ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_CREATE) + attr, err := flow.toNlData() + if err != nil { + return err + } + + for _, a := range attr { + req.AddData(a) + } + + _, err = req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + +// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle +// conntrack -U [table] Update a conntrack +func (h *Handle) ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_REPLACE) + attr, err := flow.toNlData() + if err != nil { + return err + } + + for _, a := range attr { + req.AddData(a) + } + + _, err = req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [Handle.ConntrackDeleteFilters] instead. func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { + return h.ConntrackDeleteFilters(table, family, filter) +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { res, err := h.dumpConntrackTable(table, family) if err != nil { return 0, err @@ -98,12 +160,16 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami var matched uint for _, dataRaw := range res { flow := parseRawData(dataRaw) - if match := filter.MatchConntrackFlow(flow); match { - req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) - // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already - req2.AddRawData(dataRaw[4:]) - req2.Execute(unix.NETLINK_NETFILTER, 0) - matched++ + for _, filter := range filters { + if match := filter.MatchConntrackFlow(flow); match { + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already + req2.AddRawData(dataRaw[4:]) + req2.Execute(unix.NETLINK_NETFILTER, 0) + matched++ + // flow is already deleted, no need to match on other filters and continue to the next flow. + break + } } } @@ -128,10 +194,44 @@ func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) return req.Execute(unix.NETLINK_NETFILTER, 0) } +// ProtoInfo wraps an L4-protocol structure - roughly corresponds to the +// __nfct_protoinfo union found in libnetfilter_conntrack/include/internal/object.h. +// Currently, only protocol names, and TCP state is supported. +type ProtoInfo interface { + Protocol() string +} + +// ProtoInfoTCP corresponds to the `tcp` struct of the __nfct_protoinfo union. +// Only TCP state is currently supported. +type ProtoInfoTCP struct { + State uint8 +} +// Protocol returns "tcp". +func (*ProtoInfoTCP) Protocol() string {return "tcp"} +func (p *ProtoInfoTCP) toNlData() ([]*nl.RtAttr, error) { + ctProtoInfo := nl.NewRtAttr(unix.NLA_F_NESTED | nl.CTA_PROTOINFO, []byte{}) + ctProtoInfoTCP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_PROTOINFO_TCP, []byte{}) + ctProtoInfoTCPState := nl.NewRtAttr(nl.CTA_PROTOINFO_TCP_STATE, nl.Uint8Attr(p.State)) + ctProtoInfoTCP.AddChild(ctProtoInfoTCPState) + ctProtoInfo.AddChild(ctProtoInfoTCP) + + return []*nl.RtAttr{ctProtoInfo}, nil +} + +// ProtoInfoSCTP only supports the protocol name. +type ProtoInfoSCTP struct {} +// Protocol returns "sctp". +func (*ProtoInfoSCTP) Protocol() string {return "sctp"} + +// ProtoInfoDCCP only supports the protocol name. +type ProtoInfoDCCP struct {} +// Protocol returns "dccp". +func (*ProtoInfoDCCP) Protocol() string {return "dccp"} + // The full conntrack flow structure is very complicated and can be found in the file: // http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h // For the time being, the structure below allows to parse and extract the base information of a flow -type ipTuple struct { +type IPTuple struct { Bytes uint64 DstIP net.IP DstPort uint16 @@ -141,28 +241,150 @@ type ipTuple struct { SrcPort uint16 } +// toNlData generates the inner fields of a nested tuple netlink datastructure +// does not generate the "nested"-flagged outer message. +func (t *IPTuple) toNlData(family uint8) ([]*nl.RtAttr, error) { + + var srcIPsFlag, dstIPsFlag int + if family == nl.FAMILY_V4 { + srcIPsFlag = nl.CTA_IP_V4_SRC + dstIPsFlag = nl.CTA_IP_V4_DST + } else if family == nl.FAMILY_V6 { + srcIPsFlag = nl.CTA_IP_V6_SRC + dstIPsFlag = nl.CTA_IP_V6_DST + } else { + return []*nl.RtAttr{}, fmt.Errorf("couldn't generate netlink message for tuple due to unrecognized FamilyType '%d'", family) + } + + ctTupleIP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_IP, nil) + ctTupleIPSrc := nl.NewRtAttr(srcIPsFlag, t.SrcIP) + ctTupleIP.AddChild(ctTupleIPSrc) + ctTupleIPDst := nl.NewRtAttr(dstIPsFlag, t.DstIP) + ctTupleIP.AddChild(ctTupleIPDst) + + ctTupleProto := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_PROTO, nil) + ctTupleProtoNum := nl.NewRtAttr(nl.CTA_PROTO_NUM, []byte{t.Protocol}) + ctTupleProto.AddChild(ctTupleProtoNum) + ctTupleProtoSrcPort := nl.NewRtAttr(nl.CTA_PROTO_SRC_PORT, nl.BEUint16Attr(t.SrcPort)) + ctTupleProto.AddChild(ctTupleProtoSrcPort) + ctTupleProtoDstPort := nl.NewRtAttr(nl.CTA_PROTO_DST_PORT, nl.BEUint16Attr(t.DstPort)) + ctTupleProto.AddChild(ctTupleProtoDstPort, ) + + return []*nl.RtAttr{ctTupleIP, ctTupleProto}, nil +} + type ConntrackFlow struct { FamilyType uint8 - Forward ipTuple - Reverse ipTuple + Forward IPTuple + Reverse IPTuple Mark uint32 + Zone uint16 TimeStart uint64 TimeStop uint64 TimeOut uint32 + Labels []byte + ProtoInfo ProtoInfo } func (s *ConntrackFlow) String() string { // conntrack cmd output: - // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 + // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 labels=0x00000000050012ac4202010000000000 zone=100 // start=2019-07-26 01:26:21.557800506 +0000 UTC stop=1970-01-01 00:00:00 +0000 UTC timeout=30(sec) start := time.Unix(0, int64(s.TimeStart)) stop := time.Unix(0, int64(s.TimeStop)) timeout := int32(s.TimeOut) - return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x start=%v stop=%v timeout=%d(sec)", + res := fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x ", nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes, s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes, - s.Mark, start, stop, timeout) + s.Mark) + if len(s.Labels) > 0 { + res += fmt.Sprintf("labels=0x%x ", s.Labels) + } + if s.Zone != 0 { + res += fmt.Sprintf("zone=%d ", s.Zone) + } + res += fmt.Sprintf("start=%v stop=%v timeout=%d(sec)", start, stop, timeout) + return res +} + +// toNlData generates netlink messages representing the flow. +func (s *ConntrackFlow) toNlData() ([]*nl.RtAttr, error) { + var payload []*nl.RtAttr + // The message structure is built as follows: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + + // CTA_TUPLE_ORIG + ctTupleOrig := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_ORIG, nil) + forwardFlowAttrs, err := s.Forward.toNlData(s.FamilyType) + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack forward flow: %w", err) + } + for _, a := range forwardFlowAttrs { + ctTupleOrig.AddChild(a) + } + + // CTA_TUPLE_REPLY + ctTupleReply := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_REPLY, nil) + reverseFlowAttrs, err := s.Reverse.toNlData(s.FamilyType) + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack reverse flow: %w", err) + } + for _, a := range reverseFlowAttrs { + ctTupleReply.AddChild(a) + } + + ctMark := nl.NewRtAttr(nl.CTA_MARK, nl.BEUint32Attr(s.Mark)) + ctTimeout := nl.NewRtAttr(nl.CTA_TIMEOUT, nl.BEUint32Attr(s.TimeOut)) + + payload = append(payload, ctTupleOrig, ctTupleReply, ctMark, ctTimeout) + + if s.ProtoInfo != nil { + switch p := s.ProtoInfo.(type) { + case *ProtoInfoTCP: + attrs, err := p.toNlData() + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack flow's TCP protoinfo: %w", err) + } + payload = append(payload, attrs...) + default: + return nil, errors.New("couldn't generate netlink data for conntrack: field 'ProtoInfo' only supports TCP or nil") + } + } + + return payload, nil } // This method parse the ip tuple structure @@ -172,7 +394,7 @@ func (s *ConntrackFlow) String() string { // // // -func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 { +func parseIpTuple(reader *bytes.Reader, tpl *IPTuple) uint8 { for i := 0; i < 2; i++ { _, t, _, v := parseNfAttrTLV(reader) switch t { @@ -191,7 +413,7 @@ func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 { tpl.Protocol = uint8(v[0]) } // We only parse TCP & UDP headers. Skip the others. - if tpl.Protocol != 6 && tpl.Protocol != 17 { + if tpl.Protocol != unix.IPPROTO_TCP && tpl.Protocol != unix.IPPROTO_UDP { // skip the rest bytesRemaining := protoInfoTotalLen - protoInfoBytesRead reader.Seek(int64(bytesRemaining), seekCurrent) @@ -240,9 +462,13 @@ func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) { return isNested, attrType, len } -func skipNfAttrValue(r *bytes.Reader, len uint16) { +// skipNfAttrValue seeks `r` past attr of length `len`. +// Maintains buffer alignment. +// Returns length of the seek performed. +func skipNfAttrValue(r *bytes.Reader, len uint16) uint16 { len = (len + nl.NLA_ALIGNTO - 1) & ^(nl.NLA_ALIGNTO - 1) r.Seek(int64(len), seekCurrent) + return len } func parseBERaw16(r *bytes.Reader, v *uint16) { @@ -257,6 +483,10 @@ func parseBERaw64(r *bytes.Reader, v *uint64) { binary.Read(r, binary.BigEndian, v) } +func parseRaw32(r *bytes.Reader, v *uint32) { + binary.Read(r, nl.NativeEndian(), v) +} + func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { for i := 0; i < 2; i++ { switch _, t, _ := parseNfAttrTL(r); t { @@ -296,6 +526,60 @@ func parseTimeStamp(r *bytes.Reader, readSize uint16) (tstart, tstop uint64) { } +func parseProtoInfoTCPState(r *bytes.Reader) (s uint8) { + binary.Read(r, binary.BigEndian, &s) + r.Seek(nl.SizeofNfattr - 1, seekCurrent) + return s +} + +// parseProtoInfoTCP reads the entire nested protoinfo structure, but only parses the state attr. +func parseProtoInfoTCP(r *bytes.Reader, attrLen uint16) (*ProtoInfoTCP) { + p := new(ProtoInfoTCP) + bytesRead := 0 + for bytesRead < int(attrLen) { + _, t, l := parseNfAttrTL(r) + bytesRead += nl.SizeofNfattr + + switch t { + case nl.CTA_PROTOINFO_TCP_STATE: + p.State = parseProtoInfoTCPState(r) + bytesRead += nl.SizeofNfattr + default: + bytesRead += int(skipNfAttrValue(r, l)) + } + } + + return p +} + +func parseProtoInfo(r *bytes.Reader, attrLen uint16) (p ProtoInfo) { + bytesRead := 0 + for bytesRead < int(attrLen) { + _, t, l := parseNfAttrTL(r) + bytesRead += nl.SizeofNfattr + + switch t { + case nl.CTA_PROTOINFO_TCP: + p = parseProtoInfoTCP(r, l) + bytesRead += int(l) + // No inner fields of DCCP / SCTP currently supported. + case nl.CTA_PROTOINFO_DCCP: + p = new(ProtoInfoDCCP) + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + case nl.CTA_PROTOINFO_SCTP: + p = new(ProtoInfoSCTP) + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + default: + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + } + } + + return p +} + func parseTimeOut(r *bytes.Reader) (ttimeout uint32) { parseBERaw32(r, &ttimeout) return @@ -306,6 +590,18 @@ func parseConnectionMark(r *bytes.Reader) (mark uint32) { return } +func parseConnectionLabels(r *bytes.Reader) (label []byte) { + label = make([]byte, 16) // netfilter defines 128 bit labels value + binary.Read(r, nl.NativeEndian(), &label) + return +} + +func parseConnectionZone(r *bytes.Reader) (zone uint16) { + parseBERaw16(r, &zone) + r.Seek(2, seekCurrent) + return +} + func parseRawData(data []byte) *ConntrackFlow { s := &ConntrackFlow{} // First there is the Nfgenmsg header @@ -343,7 +639,7 @@ func parseRawData(data []byte) *ConntrackFlow { case nl.CTA_TIMESTAMP: s.TimeStart, s.TimeStop = parseTimeStamp(reader, l) case nl.CTA_PROTOINFO: - skipNfAttrValue(reader, l) + s.ProtoInfo = parseProtoInfo(reader, l) default: skipNfAttrValue(reader, l) } @@ -351,10 +647,14 @@ func parseRawData(data []byte) *ConntrackFlow { switch t { case nl.CTA_MARK: s.Mark = parseConnectionMark(reader) + case nl.CTA_LABELS: + s.Labels = parseConnectionLabels(reader) case nl.CTA_TIMEOUT: s.TimeOut = parseTimeOut(reader) - case nl.CTA_STATUS, nl.CTA_USE, nl.CTA_ID: + case nl.CTA_ID, nl.CTA_STATUS, nl.CTA_USE: skipNfAttrValue(reader, l) + case nl.CTA_ZONE: + s.Zone = parseConnectionZone(reader) default: skipNfAttrValue(reader, l) } @@ -399,16 +699,18 @@ func parseRawData(data []byte) *ConntrackFlow { type ConntrackFilterType uint8 const ( - ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction - ConntrackOrigDstIP // -orig-dst ip Destination address from original direction - ConntrackReplySrcIP // --reply-src ip Reply Source IP - ConntrackReplyDstIP // --reply-dst ip Reply Destination IP - ConntrackReplyAnyIP // Match source or destination reply IP - ConntrackOrigSrcPort // --orig-port-src port Source port in original direction - ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction - ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP - ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP - ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP + ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction + ConntrackOrigDstIP // -orig-dst ip Destination address from original direction + ConntrackReplySrcIP // --reply-src ip Reply Source IP + ConntrackReplyDstIP // --reply-dst ip Reply Destination IP + ConntrackReplyAnyIP // Match source or destination reply IP + ConntrackOrigSrcPort // --orig-port-src port Source port in original direction + ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction + ConntrackMatchLabels // --label label1,label2 Labels used in entry + ConntrackUnmatchLabels // --label label1,label2 Labels not used in entry + ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP + ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP + ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP ) type CustomConntrackFilter interface { @@ -421,6 +723,8 @@ type ConntrackFilter struct { ipNetFilter map[ConntrackFilterType]*net.IPNet portFilter map[ConntrackFilterType]uint16 protoFilter uint8 + labelFilter map[ConntrackFilterType][][]byte + zoneFilter *uint16 } // AddIPNet adds a IP subnet to the conntrack filter @@ -474,10 +778,43 @@ func (f *ConntrackFilter) AddProtocol(proto uint8) error { return nil } +// AddLabels adds the provided list (zero or more) of labels to the conntrack filter +// ConntrackFilterType here can be either: +// 1. ConntrackMatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` contains ALL the provided labels +// it is considered a match. This can be used when you want to match flows that contain +// one or more labels. +// 2. ConntrackUnmatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` does NOT contain ALL the provided labels +// it is considered a match. This can be used when you want to match flows that don't contain +// one or more labels. +func (f *ConntrackFilter) AddLabels(tp ConntrackFilterType, labels [][]byte) error { + if len(labels) == 0 { + return errors.New("Invalid length for provided labels") + } + if f.labelFilter == nil { + f.labelFilter = make(map[ConntrackFilterType][][]byte) + } + if _, ok := f.labelFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.labelFilter[tp] = labels + return nil +} + +// AddZone adds a zone to the conntrack filter +func (f *ConntrackFilter) AddZone(zone uint16) error { + if f.zoneFilter != nil { + return errors.New("Filter attribute already present") + } + f.zoneFilter = &zone + return nil +} + // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter // false otherwise func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { - if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 { + if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 && len(f.labelFilter) == 0 && f.zoneFilter == nil { // empty filter always not match return false } @@ -488,6 +825,11 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { return false } + // Conntrack zone filter + if f.zoneFilter != nil && *f.zoneFilter != flow.Zone { + return false + } + match := true // IP conntrack filter @@ -531,6 +873,29 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { } } + // Label filter + if len(f.labelFilter) > 0 { + if len(flow.Labels) > 0 { + // --label label1,label2 in conn entry; + // every label passed should be contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackMatchLabels]; match && found { + for _, label := range elem { + match = match && (bytes.Contains(flow.Labels, label)) + } + } + // --label label1,label2 in conn entry; + // every label passed should be not contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackUnmatchLabels]; match && found { + for _, label := range elem { + match = match && !(bytes.Contains(flow.Labels, label)) + } + } + } else { + // flow doesn't contain labels, so it doesn't contain or notContain any provided matches + match = false + } + } + return match } diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go index af7af799e..0bfdf422d 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go @@ -11,6 +11,9 @@ type InetFamily uint8 // ConntrackFlow placeholder type ConntrackFlow struct{} +// CustomConntrackFilter placeholder +type CustomConntrackFilter struct{} + // ConntrackFilter placeholder type ConntrackFilter struct{} @@ -29,10 +32,18 @@ func ConntrackTableFlush(table ConntrackTableType) error { // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [ConntrackDeleteFilter] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { return 0, ErrNotImplemented } +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} + // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed // conntrack -L [table] [options] List conntrack or expectation table func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { @@ -48,6 +59,14 @@ func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [Handle.ConntrackDeleteFilters] instead. func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { return 0, ErrNotImplemented } + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go index 358b232c6..d98801dbb 100644 --- a/vendor/github.com/vishvananda/netlink/devlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -84,6 +84,270 @@ type DevlinkDeviceInfo struct { FwUndi string } +// DevlinkResource represents a device resource +type DevlinkResource struct { + Name string + ID uint64 + Size uint64 + SizeNew uint64 + SizeMin uint64 + SizeMax uint64 + SizeGranularity uint64 + PendingChange bool + Unit uint8 + SizeValid bool + OCCValid bool + OCCSize uint64 + Parent *DevlinkResource + Children []DevlinkResource +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkResource, returns error if occured +func (dlr *DevlinkResource) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error { + var attr syscall.NetlinkRouteAttr + var ok bool + + // mandatory attributes + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_ID] + if !ok { + return fmt.Errorf("missing resource id") + } + dlr.ID = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_NAME] + if !ok { + return fmt.Errorf("missing resource name") + } + dlr.Name = nl.BytesToString(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE] + if !ok { + return fmt.Errorf("missing resource size") + } + dlr.Size = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_GRAN] + if !ok { + return fmt.Errorf("missing resource size granularity") + } + dlr.SizeGranularity = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_UNIT] + if !ok { + return fmt.Errorf("missing resource unit") + } + dlr.Unit = uint8(attr.Value[0]) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MIN] + if !ok { + return fmt.Errorf("missing resource size min") + } + dlr.SizeMin = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MAX] + if !ok { + return fmt.Errorf("missing resource size max") + } + dlr.SizeMax = native.Uint64(attr.Value) + + // optional attributes + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_OCC] + if ok { + dlr.OCCSize = native.Uint64(attr.Value) + dlr.OCCValid = true + } + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_VALID] + if ok { + dlr.SizeValid = uint8(attr.Value[0]) != 0 + } + + dlr.SizeNew = dlr.Size + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_NEW] + if ok { + dlr.SizeNew = native.Uint64(attr.Value) + } + + dlr.PendingChange = dlr.Size != dlr.SizeNew + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST] + if ok { + // handle nested resoruces recursively + subResources, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + + for _, subresource := range subResources { + resource := DevlinkResource{Parent: dlr} + attrs, err := nl.ParseRouteAttrAsMap(subresource.Value) + if err != nil { + return err + } + err = resource.parseAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to parse child resource, parent:%s. %w", dlr.Name, err) + } + dlr.Children = append(dlr.Children, resource) + } + } + return nil +} + +// DevlinkResources represents all devlink resources of a devlink device +type DevlinkResources struct { + Bus string + Device string + Resources []DevlinkResource +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkResources, returns error if occured +func (dlrs *DevlinkResources) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error { + var attr syscall.NetlinkRouteAttr + var ok bool + + // Bus + attr, ok = attrs[nl.DEVLINK_ATTR_BUS_NAME] + if !ok { + return fmt.Errorf("missing bus name") + } + dlrs.Bus = nl.BytesToString(attr.Value) + + // Device + attr, ok = attrs[nl.DEVLINK_ATTR_DEV_NAME] + if !ok { + return fmt.Errorf("missing device name") + } + dlrs.Device = nl.BytesToString(attr.Value) + + // Resource List + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST] + if !ok { + return fmt.Errorf("missing resource list") + } + + resourceAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + + for _, resourceAttr := range resourceAttrs { + resource := DevlinkResource{} + attrs, err := nl.ParseRouteAttrAsMap(resourceAttr.Value) + if err != nil { + return err + } + err = resource.parseAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to parse root resoruces, %w", err) + } + dlrs.Resources = append(dlrs.Resources, resource) + } + + return nil +} + +// DevlinkParam represents parameter of the device +type DevlinkParam struct { + Name string + IsGeneric bool + Type uint8 // possible values are in nl.DEVLINK_PARAM_TYPE_* constants + Values []DevlinkParamValue +} + +// DevlinkParamValue contains values of the parameter +// Data field contains specific type which can be casted by unsing info from the DevlinkParam.Type field +type DevlinkParamValue struct { + rawData []byte + Data interface{} + CMODE uint8 // possible values are in nl.DEVLINK_PARAM_CMODE_* constants +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkParam, returns error if occured +func (dlp *DevlinkParam) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + var valuesList [][]syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM: + nattrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + for _, nattr := range nattrs { + switch nattr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM_NAME: + dlp.Name = nl.BytesToString(nattr.Value) + case nl.DEVLINK_ATTR_PARAM_GENERIC: + dlp.IsGeneric = true + case nl.DEVLINK_ATTR_PARAM_TYPE: + if len(nattr.Value) == 1 { + dlp.Type = nattr.Value[0] + } + case nl.DEVLINK_ATTR_PARAM_VALUES_LIST: + nnattrs, err := nl.ParseRouteAttr(nattr.Value) + if err != nil { + return err + } + valuesList = append(valuesList, nnattrs) + } + } + } + } + for _, valAttr := range valuesList { + v := DevlinkParamValue{} + if err := v.parseAttributes(valAttr, dlp.Type); err != nil { + return err + } + dlp.Values = append(dlp.Values, v) + } + return nil +} + +func (dlpv *DevlinkParamValue) parseAttributes(attrs []syscall.NetlinkRouteAttr, paramType uint8) error { + for _, attr := range attrs { + nattrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + var rawData []byte + for _, nattr := range nattrs { + switch nattr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM_VALUE_DATA: + rawData = nattr.Value + case nl.DEVLINK_ATTR_PARAM_VALUE_CMODE: + if len(nattr.Value) == 1 { + dlpv.CMODE = nattr.Value[0] + } + } + } + switch paramType { + case nl.DEVLINK_PARAM_TYPE_U8: + dlpv.Data = uint8(0) + if rawData != nil && len(rawData) == 1 { + dlpv.Data = uint8(rawData[0]) + } + case nl.DEVLINK_PARAM_TYPE_U16: + dlpv.Data = uint16(0) + if rawData != nil { + dlpv.Data = native.Uint16(rawData) + } + case nl.DEVLINK_PARAM_TYPE_U32: + dlpv.Data = uint32(0) + if rawData != nil { + dlpv.Data = native.Uint32(rawData) + } + case nl.DEVLINK_PARAM_TYPE_STRING: + dlpv.Data = "" + if rawData != nil { + dlpv.Data = nl.BytesToString(rawData) + } + case nl.DEVLINK_PARAM_TYPE_BOOL: + dlpv.Data = rawData != nil + } + } + return nil +} + func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) { devices := make([]*DevlinkDevice, 0, len(msgs)) for _, m := range msgs { @@ -443,6 +707,173 @@ func (h *Handle) DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint return port, err } +// DevlinkGetDeviceResources returns devlink device resources +func DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) { + return pkgHandle.DevlinkGetDeviceResources(bus, device) +} + +// DevlinkGetDeviceResources returns devlink device resources +func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_RESOURCE_DUMP, bus, device) + if err != nil { + return nil, err + } + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + var resources DevlinkResources + for _, m := range respmsg { + attrs, err := nl.ParseRouteAttrAsMap(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + resources.parseAttributes(attrs) + } + + return &resources, nil +} + +// DevlinkGetDeviceParams returns parameters for devlink device +// Equivalent to: `devlink dev param show /` +func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) + if err != nil { + return nil, err + } + req.Flags |= unix.NLM_F_DUMP + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + var params []*DevlinkParam + for _, m := range respmsg { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + p := &DevlinkParam{} + if err := p.parseAttributes(attrs); err != nil { + return nil, err + } + params = append(params, p) + } + + return params, nil +} + +// DevlinkGetDeviceParams returns parameters for devlink device +// Equivalent to: `devlink dev param show /` +func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { + return pkgHandle.DevlinkGetDeviceParams(bus, device) +} + +// DevlinkGetDeviceParamByName returns specific parameter for devlink device +// Equivalent to: `devlink dev param show / name ` +func (h *Handle) DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) + if err != nil { + return nil, err + } + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param))) + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + if len(respmsg) == 0 { + return nil, fmt.Errorf("unexpected response") + } + attrs, err := nl.ParseRouteAttr(respmsg[0][nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + p := &DevlinkParam{} + if err := p.parseAttributes(attrs); err != nil { + return nil, err + } + return p, nil +} + +// DevlinkGetDeviceParamByName returns specific parameter for devlink device +// Equivalent to: `devlink dev param show / name ` +func DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) { + return pkgHandle.DevlinkGetDeviceParamByName(bus, device, param) +} + +// DevlinkSetDeviceParam set specific parameter for devlink device +// Equivalent to: `devlink dev param set / name cmode value ` +// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants +// value argument should have one of the following types: uint8, uint16, uint32, string, bool +func (h *Handle) DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error { + // retrive the param type + p, err := h.DevlinkGetDeviceParamByName(bus, device, param) + if err != nil { + return fmt.Errorf("failed to get device param: %v", err) + } + paramType := p.Type + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_SET, bus, device) + if err != nil { + return err + } + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_TYPE, nl.Uint8Attr(paramType))) + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param))) + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_CMODE, nl.Uint8Attr(cmode))) + + var valueAsBytes []byte + switch paramType { + case nl.DEVLINK_PARAM_TYPE_U8: + v, ok := value.(uint8) + if !ok { + return fmt.Errorf("unepected value type required: uint8, actual: %T", value) + } + valueAsBytes = nl.Uint8Attr(v) + case nl.DEVLINK_PARAM_TYPE_U16: + v, ok := value.(uint16) + if !ok { + return fmt.Errorf("unepected value type required: uint16, actual: %T", value) + } + valueAsBytes = nl.Uint16Attr(v) + case nl.DEVLINK_PARAM_TYPE_U32: + v, ok := value.(uint32) + if !ok { + return fmt.Errorf("unepected value type required: uint32, actual: %T", value) + } + valueAsBytes = nl.Uint32Attr(v) + case nl.DEVLINK_PARAM_TYPE_STRING: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unepected value type required: string, actual: %T", value) + } + valueAsBytes = nl.ZeroTerminated(v) + case nl.DEVLINK_PARAM_TYPE_BOOL: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unepected value type required: bool, actual: %T", value) + } + if v { + valueAsBytes = []byte{} + } + default: + return fmt.Errorf("unsupported parameter type: %d", paramType) + } + if valueAsBytes != nil { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_DATA, valueAsBytes)) + } + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevlinkSetDeviceParam set specific parameter for devlink device +// Equivalent to: `devlink dev param set / name cmode value ` +// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants +// value argument should have one of the following types: uint8, uint16, uint32, string, bool +func DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error { + return pkgHandle.DevlinkSetDeviceParam(bus, device, param, cmode, value) +} + // DevLinkGetPortByIndex provides a pointer to devlink portand nil error, // otherwise returns an error code. func DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 2d798b0fb..84e1ca7a4 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -19,6 +19,7 @@ type FilterAttrs struct { Parent uint32 Priority uint16 // lower is higher priority Protocol uint16 // unix.ETH_P_* + Chain *uint32 } func (q FilterAttrs) String() string { @@ -27,6 +28,11 @@ func (q FilterAttrs) String() string { type TcAct int32 +const ( + TC_ACT_EXT_SHIFT = 28 + TC_ACT_EXT_VAL_MASK = (1 << TC_ACT_EXT_SHIFT) - 1 +) + const ( TC_ACT_UNSPEC TcAct = -1 TC_ACT_OK TcAct = 0 @@ -40,6 +46,22 @@ const ( TC_ACT_JUMP TcAct = 0x10000000 ) +func getTcActExt(local int32) int32 { + return local << TC_ACT_EXT_SHIFT +} + +func getTcActGotoChain() TcAct { + return TcAct(getTcActExt(2)) +} + +func getTcActExtOpcode(combined int32) int32 { + return combined & (^TC_ACT_EXT_VAL_MASK) +} + +func TcActExtCmp(combined int32, opcode int32) bool { + return getTcActExtOpcode(combined) == opcode +} + func (a TcAct) String() string { switch a { case TC_ACT_UNSPEC: @@ -63,6 +85,9 @@ func (a TcAct) String() string { case TC_ACT_JUMP: return "jump" } + if TcActExtCmp(int32(a), int32(getTcActGotoChain())) { + return "goto" + } return fmt.Sprintf("0x%x", int32(a)) } @@ -93,17 +118,32 @@ func (a TcPolAct) String() string { } type ActionAttrs struct { - Index int - Capab int - Action TcAct - Refcnt int - Bindcnt int + Index int + Capab int + Action TcAct + Refcnt int + Bindcnt int + Statistics *ActionStatistic + Timestamp *ActionTimestamp } func (q ActionAttrs) String() string { return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt) } +type ActionTimestamp struct { + Installed uint64 + LastUsed uint64 + Expires uint64 + FirstUsed uint64 +} + +func (t ActionTimestamp) String() string { + return fmt.Sprintf("Installed %d LastUsed %d Expires %d FirstUsed %d", t.Installed, t.LastUsed, t.Expires, t.FirstUsed) +} + +type ActionStatistic ClassStatistics + // Action represents an action in any supported filter. type Action interface { Attrs() *ActionAttrs @@ -112,6 +152,7 @@ type Action interface { type GenericAction struct { ActionAttrs + Chain int32 } func (action *GenericAction) Type() string { @@ -275,6 +316,7 @@ type SkbEditAction struct { PType *uint16 Priority *uint32 Mark *uint32 + Mask *uint32 } func (action *SkbEditAction) Type() string { @@ -348,6 +390,7 @@ type FwFilter struct { InDev string Mask uint32 Police *PoliceAction + Actions []Action } func (filter *FwFilter) Attrs() *FilterAttrs { @@ -390,3 +433,30 @@ func (filter *GenericFilter) Attrs() *FilterAttrs { func (filter *GenericFilter) Type() string { return filter.FilterType } + +type PeditAction struct { + ActionAttrs + Proto uint8 + SrcMacAddr net.HardwareAddr + DstMacAddr net.HardwareAddr + SrcIP net.IP + DstIP net.IP + SrcPort uint16 + DstPort uint16 +} + +func (p *PeditAction) Attrs() *ActionAttrs { + return &p.ActionAttrs +} + +func (p *PeditAction) Type() string { + return "pedit" +} + +func NewPeditAction() *PeditAction { + return &PeditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index 4c6d1cf7d..87cd18f8e 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -41,6 +41,7 @@ type U32 struct { RedirIndex int Sel *TcU32Sel Actions []Action + Police *PoliceAction } func (filter *U32) Attrs() *FilterAttrs { @@ -64,6 +65,11 @@ type Flower struct { EncSrcIPMask net.IPMask EncDestPort uint16 EncKeyId uint32 + SkipHw bool + SkipSw bool + IPProto *nl.IPProto + DestPort uint16 + SrcPort uint16 Actions []Action } @@ -129,6 +135,39 @@ func (filter *Flower) encode(parent *nl.RtAttr) error { if filter.EncKeyId != 0 { parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId)) } + if filter.IPProto != nil { + ipproto := *filter.IPProto + parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize()) + if filter.SrcPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_SRC, htons(filter.SrcPort)) + } + } + if filter.DestPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_DST, htons(filter.DestPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_DST, htons(filter.DestPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_DST, htons(filter.DestPort)) + } + } + } + + var flags uint32 = 0 + if filter.SkipHw { + flags |= nl.TCA_CLS_FLAGS_SKIP_HW + } + if filter.SkipSw { + flags |= nl.TCA_CLS_FLAGS_SKIP_SW + } + parent.AddRtAttr(nl.TCA_FLOWER_FLAGS, htonl(flags)) actionsAttr := parent.AddRtAttr(nl.TCA_FLOWER_ACT, nil) if err := EncodeActions(actionsAttr, filter.Actions); err != nil { @@ -162,6 +201,14 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { filter.EncDestPort = ntohs(datum.Value) case nl.TCA_FLOWER_KEY_ENC_KEY_ID: filter.EncKeyId = ntohl(datum.Value) + case nl.TCA_FLOWER_KEY_IP_PROTO: + val := new(nl.IPProto) + *val = nl.IPProto(datum.Value[0]) + filter.IPProto = val + case nl.TCA_FLOWER_KEY_TCP_SRC, nl.TCA_FLOWER_KEY_UDP_SRC, nl.TCA_FLOWER_KEY_SCTP_SRC: + filter.SrcPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_TCP_DST, nl.TCA_FLOWER_KEY_UDP_DST, nl.TCA_FLOWER_KEY_SCTP_DST: + filter.DestPort = ntohs(datum.Value) case nl.TCA_FLOWER_ACT: tables, err := nl.ParseRouteAttr(datum.Value) if err != nil { @@ -171,6 +218,16 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { if err != nil { return err } + case nl.TCA_FLOWER_FLAGS: + attr := nl.DeserializeUint32Bitfield(datum.Value) + skipSw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_HW + skipHw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_SW + if skipSw != 0 { + filter.SkipSw = true + } + if skipHw != 0 { + filter.SkipHw = true + } } } return nil @@ -185,19 +242,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) - base := filter.Attrs() - msg := &nl.TcMsg{ - Family: nl.FAMILY_ALL, - Ifindex: int32(base.LinkIndex), - Handle: base.Handle, - Parent: base.Parent, - Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), - } - req.AddData(msg) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err + return h.filterModify(filter, unix.RTM_DELTFILTER, 0) } // FilterAdd will add a filter to the system. @@ -209,7 +254,7 @@ func FilterAdd(filter Filter) error { // FilterAdd will add a filter to the system. // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { - return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL) + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL) } // FilterReplace will replace a filter. @@ -221,11 +266,11 @@ func FilterReplace(filter Filter) error { // FilterReplace will replace a filter. // Equivalent to: `tc filter replace $filter` func (h *Handle) FilterReplace(filter Filter) error { - return h.filterModify(filter, unix.NLM_F_CREATE) + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE) } -func (h *Handle) filterModify(filter Filter, flags int) error { - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK) +func (h *Handle) filterModify(filter Filter, proto, flags int) error { + req := h.newNetlinkRequest(proto, flags|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -235,6 +280,9 @@ func (h *Handle) filterModify(filter Filter, flags int) error { Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), } req.AddData(msg) + if filter.Attrs().Chain != nil { + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(*filter.Attrs().Chain))) + } req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) @@ -284,6 +332,12 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.Link != 0 { options.AddRtAttr(nl.TCA_U32_LINK, nl.Uint32Attr(filter.Link)) } + if filter.Police != nil { + police := options.AddRtAttr(nl.TCA_U32_POLICE, nil) + if err := encodePolice(police, filter.Police); err != nil { + return err + } + } actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil) // backwards compatibility if filter.RedirIndex != 0 { @@ -312,6 +366,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { native.PutUint32(b, filter.ClassId) options.AddRtAttr(nl.TCA_FW_CLASSID, b) } + actionsAttr := options.AddRtAttr(nl.TCA_FW_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } case *BpfFilter: var bpfFlags uint32 if filter.ClassId != 0 { @@ -340,7 +398,6 @@ func (h *Handle) filterModify(filter Filter, flags int) error { return err } } - req.AddData(options) _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err @@ -446,6 +503,10 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { default: detailed = true } + case nl.TCA_CHAIN: + val := new(uint32) + *val = native.Uint32(attr.Value) + base.Chain = val } } // only return the detailed version of the filter @@ -474,6 +535,14 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { attrs.Bindcnt = int(tcgen.Bindcnt) } +func toTimeStamp(tcf *nl.Tcf) *ActionTimestamp { + return &ActionTimestamp{ + Installed: tcf.Install, + LastUsed: tcf.LastUse, + Expires: tcf.Expires, + FirstUsed: tcf.FirstUse} +} + func encodePolice(attr *nl.RtAttr, action *PoliceAction) error { var rtab [256]uint32 var ptab [256]uint32 @@ -597,6 +666,9 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { if action.Mark != nil { aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark)) } + if action.Mask != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MASK, nl.Uint32Attr(*action.Mask)) + } case *ConnmarkAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -635,6 +707,29 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { gen := nl.TcGen{} toTcGen(action.Attrs(), &gen) aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize()) + case *PeditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + pedit := nl.TcPedit{} + if action.SrcMacAddr != nil { + pedit.SetEthSrc(action.SrcMacAddr) + } + if action.DstMacAddr != nil { + pedit.SetEthDst(action.DstMacAddr) + } + if action.SrcIP != nil { + pedit.SetSrcIP(action.SrcIP) + } + if action.DstIP != nil { + pedit.SetDstIP(action.DstIP) + } + if action.SrcPort != 0 { + pedit.SetSrcPort(action.SrcPort, action.Proto) + } + if action.DstPort != 0 { + pedit.SetDstPort(action.DstPort, action.Proto) + } + pedit.Encode(table) } } return nil @@ -668,6 +763,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { for _, table := range tables { var action Action var actionType string + var actionnStatistic *ActionStatistic + var actionTimestamp *ActionTimestamp aattrs, err := nl.ParseRouteAttr(table.Value) if err != nil { return nil, err @@ -695,6 +792,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &SkbEditAction{} case "police": action = &PoliceAction{} + case "pedit": + action = &PeditAction{} default: break nextattr } @@ -713,7 +812,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).Ifindex = int(mirred.Ifindex) action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) + case nl.TCA_MIRRED_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } + case "tunnel_key": switch adatum.Attr.Type { case nl.TCA_TUNNEL_KEY_PARMS: @@ -729,6 +832,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*TunnelKeyAction).DstAddr = adatum.Value[:] case nl.TCA_TUNNEL_KEY_ENC_DST_PORT: action.(*TunnelKeyAction).DestPort = ntohs(adatum.Value) + case nl.TCA_TUNNEL_KEY_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "skbedit": switch adatum.Attr.Type { @@ -739,6 +845,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_SKBEDIT_MARK: mark := native.Uint32(adatum.Value[0:4]) action.(*SkbEditAction).Mark = &mark + case nl.TCA_SKBEDIT_MASK: + mask := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mask = &mask case nl.TCA_SKBEDIT_PRIORITY: priority := native.Uint32(adatum.Value[0:4]) action.(*SkbEditAction).Priority = &priority @@ -748,6 +857,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_SKBEDIT_QUEUE_MAPPING: mapping := native.Uint16(adatum.Value[0:2]) action.(*SkbEditAction).QueueMapping = &mapping + case nl.TCA_SKBEDIT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "bpf": switch adatum.Attr.Type { @@ -758,6 +870,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4])) case nl.TCA_ACT_BPF_NAME: action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) + case nl.TCA_ACT_BPF_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "connmark": switch adatum.Attr.Type { @@ -766,6 +881,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*ConnmarkAction).ActionAttrs = ActionAttrs{} toAttrs(&connmark.TcGen, action.Attrs()) action.(*ConnmarkAction).Zone = connmark.Zone + case nl.TCA_CONNMARK_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "csum": switch adatum.Attr.Type { @@ -774,19 +892,36 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*CsumAction).ActionAttrs = ActionAttrs{} toAttrs(&csum.TcGen, action.Attrs()) action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags) + case nl.TCA_CSUM_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: gen := *nl.DeserializeTcGen(adatum.Value) toAttrs(&gen, action.Attrs()) + if action.Attrs().Action.String() == "goto" { + action.(*GenericAction).Chain = TC_ACT_EXT_VAL_MASK & gen.Action + } + case nl.TCA_GACT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "police": parsePolice(adatum, action.(*PoliceAction)) } } + case nl.TCA_ACT_STATS: + s, err := parseTcStats2(aattr.Value) + if err != nil { + return nil, err + } + actionnStatistic = (*ActionStatistic)(s) } } + action.Attrs().Statistics = actionnStatistic + action.Attrs().Timestamp = actionTimestamp actions = append(actions, action) } return actions, nil @@ -824,6 +959,13 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) u32.RedirIndex = int(action.Ifindex) } } + case nl.TCA_U32_POLICE: + var police PoliceAction + adata, _ := nl.ParseRouteAttr(datum.Value) + for _, aattr := range adata { + parsePolice(aattr, &police) + } + u32.Police = &police case nl.TCA_U32_CLASSID: u32.ClassId = native.Uint32(datum.Value) case nl.TCA_U32_DIVISOR: @@ -855,6 +997,15 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { parsePolice(aattr, &police) } fw.Police = &police + case nl.TCA_FW_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + fw.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } } } return detailed, nil diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go index cc94a4e00..3fe03642e 100644 --- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -79,6 +79,10 @@ func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { return ErrNotImplemented } +func (h *Handle) LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return ErrNotImplemented +} + func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { return ErrNotImplemented } @@ -163,6 +167,22 @@ func (h *Handle) LinkSetGroup(link Link, group int) error { return ErrNotImplemented } +func (h *Handle) LinkSetGSOMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func (h *Handle) LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { return ErrNotImplemented } @@ -243,6 +263,10 @@ func (h *Handle) RouteAppend(route *Route) error { return ErrNotImplemented } +func (h *Handle) RouteChange(route *Route) error { + return ErrNotImplemented +} + func (h *Handle) RouteDel(route *Route) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/inet_diag.go b/vendor/github.com/vishvananda/netlink/inet_diag.go index bee391a80..2904d9642 100644 --- a/vendor/github.com/vishvananda/netlink/inet_diag.go +++ b/vendor/github.com/vishvananda/netlink/inet_diag.go @@ -21,6 +21,10 @@ const ( INET_DIAG_BBRINFO INET_DIAG_CLASS_ID INET_DIAG_MD5SIG + INET_DIAG_ULP_INFO + INET_DIAG_SK_BPF_STORAGES + INET_DIAG_CGROUP_ID + INET_DIAG_SOCKOPT INET_DIAG_MAX ) @@ -29,3 +33,8 @@ type InetDiagTCPInfoResp struct { TCPInfo *TCPInfo TCPBBRInfo *TCPBBRInfo } + +type InetDiagUDPInfoResp struct { + InetDiagMsg *Socket + Memory *MemInfo +} diff --git a/vendor/github.com/vishvananda/netlink/ipset_linux.go b/vendor/github.com/vishvananda/netlink/ipset_linux.go index 1f4eae81c..f4c05229f 100644 --- a/vendor/github.com/vishvananda/netlink/ipset_linux.go +++ b/vendor/github.com/vishvananda/netlink/ipset_linux.go @@ -67,11 +67,13 @@ type IpsetCreateOptions struct { Comments bool Skbinfo bool - Revision uint8 - IPFrom net.IP - IPTo net.IP - PortFrom uint16 - PortTo uint16 + Family uint8 + Revision uint8 + IPFrom net.IP + IPTo net.IP + PortFrom uint16 + PortTo uint16 + MaxElements uint32 } // IpsetProtocol returns the ipset protocol version from the kernel @@ -94,6 +96,11 @@ func IpsetFlush(setname string) error { return pkgHandle.IpsetFlush(setname) } +// IpsetSwap swaps two ipsets. +func IpsetSwap(setname, othersetname string) error { + return pkgHandle.IpsetSwap(setname, othersetname) +} + // IpsetList dumps an specific ipset. func IpsetList(setname string) (*IPSetResult, error) { return pkgHandle.IpsetList(setname) @@ -114,6 +121,11 @@ func IpsetDel(setname string, entry *IPSetEntry) error { return pkgHandle.IpsetDel(setname, entry) } +// IpsetTest tests whether an entry is in a set or not. +func IpsetTest(setname string, entry *IPSetEntry) (bool, error) { + return pkgHandle.IpsetTest(setname, entry) +} + func (h *Handle) IpsetProtocol() (protocol uint8, minVersion uint8, err error) { req := h.newIpsetRequest(nl.IPSET_CMD_PROTOCOL) msgs, err := req.Execute(unix.NETLINK_NETFILTER, 0) @@ -153,11 +165,18 @@ func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOption data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_FROM|int(nl.NLA_F_NET_BYTEORDER), buf[:2])) data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_TO|int(nl.NLA_F_NET_BYTEORDER), buf[2:])) default: - family = unix.AF_INET + family = options.Family + if family == 0 { + family = unix.AF_INET + } } req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_FAMILY, nl.Uint8Attr(family))) + if options.MaxElements != 0 { + data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MAXELEM | nl.NLA_F_NET_BYTEORDER, Value: options.MaxElements}) + } + if timeout := options.Timeout; timeout != nil { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER, Value: *timeout}) } @@ -197,6 +216,14 @@ func (h *Handle) IpsetFlush(setname string) error { return err } +func (h *Handle) IpsetSwap(setname, othersetname string) error { + req := h.newIpsetRequest(nl.IPSET_CMD_SWAP) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_TYPENAME, nl.ZeroTerminated(othersetname))) + _, err := ipsetExecute(req) + return err +} + func (h *Handle) IpsetList(name string) (*IPSetResult, error) { req := h.newIpsetRequest(nl.IPSET_CMD_LIST) req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(name))) @@ -236,18 +263,23 @@ func (h *Handle) IpsetDel(setname string, entry *IPSetEntry) error { return h.ipsetAddDel(nl.IPSET_CMD_DEL, setname, entry) } -func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error { - req := h.newIpsetRequest(nlCmd) - req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) - - if entry.Comment != "" { - req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment))) +func encodeIP(ip net.IP) (*nl.RtAttr, error) { + typ := int(nl.NLA_F_NET_BYTEORDER) + if ip4 := ip.To4(); ip4 != nil { + typ |= nl.IPSET_ATTR_IPADDR_IPV4 + ip = ip4 + } else { + typ |= nl.IPSET_ATTR_IPADDR_IPV6 } + return nl.NewRtAttr(typ, ip), nil +} + +func buildEntryData(entry *IPSetEntry) (*nl.RtAttr, error) { data := nl.NewRtAttr(nl.IPSET_ATTR_DATA|int(nl.NLA_F_NESTED), nil) - if !entry.Replace { - req.Flags |= unix.NLM_F_EXCL + if entry.Comment != "" { + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment))) } if entry.Timeout != nil { @@ -255,7 +287,10 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error } if entry.IP != nil { - nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP) + nestedData, err := encodeIP(entry.IP) + if err != nil { + return nil, err + } data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NESTED), nestedData.Serialize())) } @@ -268,7 +303,10 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error } if entry.IP2 != nil { - nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP2) + nestedData, err := encodeIP(entry.IP2) + if err != nil { + return nil, err + } data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP2|int(nl.NLA_F_NESTED), nestedData.Serialize())) } @@ -295,14 +333,53 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error if entry.Mark != nil { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER, Value: *entry.Mark}) } + return data, nil +} +func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error { + req := h.newIpsetRequest(nlCmd) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + + if !entry.Replace { + req.Flags |= unix.NLM_F_EXCL + } + + data, err := buildEntryData(entry) + if err != nil { + return err + } data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_LINENO | nl.NLA_F_NET_BYTEORDER, Value: 0}) req.AddData(data) - _, err := ipsetExecute(req) + _, err = ipsetExecute(req) return err } +func (h *Handle) IpsetTest(setname string, entry *IPSetEntry) (bool, error) { + req := h.newIpsetRequest(nl.IPSET_CMD_TEST) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + + if !entry.Replace { + req.Flags |= unix.NLM_F_EXCL + } + + data, err := buildEntryData(entry) + if err != nil { + return false, err + } + req.AddData(data) + + _, err = ipsetExecute(req) + if err != nil { + if err == nl.IPSetError(nl.IPSET_ERR_EXIST) { + // not exist + return false, nil + } + return false, err + } + return true, nil +} + func (h *Handle) newIpsetRequest(cmd int) *nl.NetlinkRequest { req := h.newNetlinkRequest(cmd|(unix.NFNL_SUBSYS_IPSET<<8), nl.GetIpsetFlags(cmd)) @@ -466,7 +543,7 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: for attr := range nl.ParseAttributes(attr.Value) { switch attr.Type { - case nl.IPSET_ATTR_IP: + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: entry.IP = net.IP(attr.Value) default: log.Printf("unknown nested ADT attribute from kernel: %+v", attr) @@ -475,7 +552,7 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { case nl.IPSET_ATTR_IP2 | nl.NLA_F_NESTED: for attr := range nl.ParseAttributes(attr.Value) { switch attr.Type { - case nl.IPSET_ATTR_IP: + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: entry.IP2 = net.IP(attr.Value) default: log.Printf("unknown nested ADT attribute from kernel: %+v", attr) diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index 33c872336..f820cdb67 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -22,34 +22,41 @@ type ( // LinkAttrs represents data shared by most link types type LinkAttrs struct { - Index int - MTU int - TxQLen int // Transmit Queue Length - Name string - HardwareAddr net.HardwareAddr - Flags net.Flags - RawFlags uint32 - ParentIndex int // index of the parent link device - MasterIndex int // must be the index of a bridge - Namespace interface{} // nil | NsPid | NsFd - Alias string - Statistics *LinkStatistics - Promisc int - Allmulti int - Multi int - Xdp *LinkXdp - EncapType string - Protinfo *Protinfo - OperState LinkOperState - PhysSwitchID int - NetNsID int - NumTxQueues int - NumRxQueues int - GSOMaxSize uint32 - GSOMaxSegs uint32 - Vfs []VfInfo // virtual functions available on link - Group uint32 - Slave LinkSlave + Index int + MTU int + TxQLen int // Transmit Queue Length + Name string + HardwareAddr net.HardwareAddr + Flags net.Flags + RawFlags uint32 + ParentIndex int // index of the parent link device + MasterIndex int // must be the index of a bridge + Namespace interface{} // nil | NsPid | NsFd + Alias string + AltNames []string + Statistics *LinkStatistics + Promisc int + Allmulti int + Multi int + Xdp *LinkXdp + EncapType string + Protinfo *Protinfo + OperState LinkOperState + PhysSwitchID int + NetNsID int + NumTxQueues int + NumRxQueues int + TSOMaxSegs uint32 + TSOMaxSize uint32 + GSOMaxSegs uint32 + GSOMaxSize uint32 + GROMaxSize uint32 + GSOIPv4MaxSize uint32 + GROIPv4MaxSize uint32 + Vfs []VfInfo // virtual functions available on link + Group uint32 + PermHWAddr net.HardwareAddr + Slave LinkSlave } // LinkSlave represents a slave device. @@ -63,6 +70,7 @@ type VfInfo struct { Mac net.HardwareAddr Vlan int Qos int + VlanProto int TxRate int // IFLA_VF_TX_RATE Max TxRate Spoofchk bool LinkState uint32 @@ -265,6 +273,8 @@ type Bridge struct { AgeingTime *uint32 HelloTime *uint32 VlanFiltering *bool + VlanDefaultPVID *uint16 + GroupFwdMask *uint16 } func (bridge *Bridge) Attrs() *LinkAttrs { @@ -308,6 +318,9 @@ type Macvlan struct { // MACAddrs is only populated for Macvlan SOURCE links MACAddrs []net.HardwareAddr + + BCQueueLen uint32 + UsedBCQueueLen uint32 } func (macvlan *Macvlan) Attrs() *LinkAttrs { @@ -350,6 +363,46 @@ func (tuntap *Tuntap) Type() string { return "tuntap" } +type NetkitMode uint32 + +const ( + NETKIT_MODE_L2 NetkitMode = iota + NETKIT_MODE_L3 +) + +type NetkitPolicy int + +const ( + NETKIT_POLICY_FORWARD NetkitPolicy = 0 + NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2 +) + +func (n *Netkit) IsPrimary() bool { + return n.isPrimary +} + +// SetPeerAttrs will not take effect if trying to modify an existing netkit device +func (n *Netkit) SetPeerAttrs(Attrs *LinkAttrs) { + n.peerLinkAttrs = *Attrs +} + +type Netkit struct { + LinkAttrs + Mode NetkitMode + Policy NetkitPolicy + PeerPolicy NetkitPolicy + isPrimary bool + peerLinkAttrs LinkAttrs +} + +func (n *Netkit) Attrs() *LinkAttrs { + return &n.LinkAttrs +} + +func (n *Netkit) Type() string { + return "netkit" +} + // Veth devices must specify PeerName on create type Veth struct { LinkAttrs @@ -703,6 +756,7 @@ const ( BOND_XMIT_HASH_POLICY_LAYER2_3 BOND_XMIT_HASH_POLICY_ENCAP2_3 BOND_XMIT_HASH_POLICY_ENCAP3_4 + BOND_XMIT_HASH_POLICY_VLAN_SRCMAC BOND_XMIT_HASH_POLICY_UNKNOWN ) @@ -712,6 +766,7 @@ var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{ BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", + BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac", } var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ "layer2": BOND_XMIT_HASH_POLICY_LAYER2, @@ -719,6 +774,7 @@ var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, + "vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC, } // BondLacpRate type @@ -974,16 +1030,18 @@ func (v *VrfSlave) SlaveType() string { // https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/drivers/net/geneve.c#L1209-L1223 type Geneve struct { LinkAttrs - ID uint32 // vni - Remote net.IP - Ttl uint8 - Tos uint8 - Dport uint16 - UdpCsum uint8 - UdpZeroCsum6Tx uint8 - UdpZeroCsum6Rx uint8 - Link uint32 - FlowBased bool + ID uint32 // vni + Remote net.IP + Ttl uint8 + Tos uint8 + Dport uint16 + UdpCsum uint8 + UdpZeroCsum6Tx uint8 + UdpZeroCsum6Rx uint8 + Link uint32 + FlowBased bool + InnerProtoInherit bool + Df GeneveDf } func (geneve *Geneve) Attrs() *LinkAttrs { @@ -994,6 +1052,15 @@ func (geneve *Geneve) Type() string { return "geneve" } +type GeneveDf uint8 + +const ( + GENEVE_DF_UNSET GeneveDf = iota + GENEVE_DF_SET + GENEVE_DF_INHERIT + GENEVE_DF_MAX +) + // Gretap devices must specify LocalIP and RemoteIP on create type Gretap struct { LinkAttrs @@ -1064,6 +1131,7 @@ type Ip6tnl struct { EncapFlags uint16 EncapSport uint16 EncapDport uint16 + FlowBased bool } func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs { @@ -1165,6 +1233,7 @@ type Gretun struct { EncapFlags uint16 EncapSport uint16 EncapDport uint16 + FlowBased bool } func (gretun *Gretun) Attrs() *LinkAttrs { @@ -1208,6 +1277,7 @@ func (gtp *GTP) Type() string { } // Virtual XFRM Interfaces +// // Named "xfrmi" to prevent confusion with XFRM objects type Xfrmi struct { LinkAttrs diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index 276947a00..d713612a9 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -345,6 +345,16 @@ func (h *Handle) BridgeSetVlanFiltering(link Link, on bool) error { return h.linkModify(bridge, unix.NLM_F_ACK) } +func BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + return pkgHandle.BridgeSetVlanDefaultPVID(link, pvid) +} + +func (h *Handle) BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + bridge := link.(*Bridge) + bridge.VlanDefaultPVID = &pvid + return h.linkModify(bridge, unix.NLM_F_ACK) +} + func SetPromiscOn(link Link) error { return pkgHandle.SetPromiscOn(link) } @@ -487,6 +497,58 @@ func (h *Handle) LinkSetAlias(link Link, name string) error { return err } +// LinkAddAltName adds a new alternative name for the link device. +// Equivalent to: `ip link property add $link altname $name` +func LinkAddAltName(link Link, name string) error { + return pkgHandle.LinkAddAltName(link, name) +} + +// LinkAddAltName adds a new alternative name for the link device. +// Equivalent to: `ip link property add $link altname $name` +func (h *Handle) LinkAddAltName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINKPROP, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil) + data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name)) + + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkDelAltName delete an alternative name for the link device. +// Equivalent to: `ip link property del $link altname $name` +func LinkDelAltName(link Link, name string) error { + return pkgHandle.LinkDelAltName(link, name) +} + +// LinkDelAltName delete an alternative name for the link device. +// Equivalent to: `ip link property del $link altname $name` +func (h *Handle) LinkDelAltName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_DELLINKPROP, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil) + data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name)) + + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + // LinkSetHardwareAddr sets the hardware address of the link device. // Equivalent to: `ip link set $link address $hwaddr` func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { @@ -602,6 +664,43 @@ func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { return err } +// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto` +func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return pkgHandle.LinkSetVfVlanQosProto(link, vf, vlan, qos, proto) +} + +// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto` +func (h *Handle) LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + vfInfo := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfVlanList := vfInfo.AddRtAttr(nl.IFLA_VF_VLAN_LIST, nil) + + vfmsg := nl.VfVlanInfo{ + VfVlan: nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + Qos: uint32(qos), + }, + VlanProto: (uint16(proto)>>8)&0xFF | (uint16(proto)&0xFF)<<8, + } + + vfVlanList.AddRtAttr(nl.IFLA_VF_VLAN_INFO, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + // LinkSetVfTxRate sets the tx rate of a vf for the link. // Equivalent to: `ip link set $link vf $vf rate $rate` func LinkSetVfTxRate(link Link, vf, rate int) error { @@ -946,6 +1045,141 @@ func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { return err } +// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device. +// Equivalent to: `ip link set $link gso_max_segs $maxSegs` +func LinkSetGSOMaxSegs(link Link, maxSegs int) error { + return pkgHandle.LinkSetGSOMaxSegs(link, maxSegs) +} + +// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device. +// Equivalent to: `ip link set $link gso_max_segs $maxSegs` +func (h *Handle) LinkSetGSOMaxSegs(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_max_size $maxSize` +func LinkSetGSOMaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGSOMaxSize(link, maxSize) +} + +// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_max_size $maxSize` +func (h *Handle) LinkSetGSOMaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_max_size $maxSize` +func LinkSetGROMaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGROMaxSize(link, maxSize) +} + +// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_max_size $maxSize` +func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize` +func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGSOIPv4MaxSize(link, maxSize) +} + +// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize` +func (h *Handle) LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize` +func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGROIPv4MaxSize(link, maxSize) +} + +// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize` +func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func boolAttr(val bool) []byte { var v uint8 if val { @@ -1401,6 +1635,21 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(gsoAttr) } + if base.GROMaxSize > 0 { + groAttr := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, nl.Uint32Attr(base.GROMaxSize)) + req.AddData(groAttr) + } + + if base.GSOIPv4MaxSize > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GSOIPv4MaxSize)) + req.AddData(gsoAttr) + } + + if base.GROIPv4MaxSize > 0 { + groAttr := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GROIPv4MaxSize)) + req.AddData(groAttr) + } + if base.Group > 0 { groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group)) req.AddData(groupAttr) @@ -1437,6 +1686,10 @@ func (h *Handle) linkModify(link Link, flags int) error { if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) } + case *Netkit: + if err := addNetkitAttrs(link, linkInfo, flags); err != nil { + return err + } case *Veth: data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) @@ -1480,15 +1733,9 @@ func (h *Handle) linkModify(link Link, flags int) error { data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) case *Macvlan: - if link.Mode != MACVLAN_MODE_DEFAULT { - data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) - data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) - } + addMacvlanAttrs(link, linkInfo) case *Macvtap: - if link.Mode != MACVLAN_MODE_DEFAULT { - data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) - data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) - } + addMacvtapAttrs(link, linkInfo) case *Geneve: addGeneveAttrs(link, linkInfo) case *Gretap: @@ -1569,6 +1816,13 @@ func (h *Handle) linkByNameDump(name string) (Link, error) { if link.Attrs().Name == name { return link, nil } + + // support finding interfaces also via altnames + for _, altName := range link.Attrs().AltNames { + if altName == name { + return link, nil + } + } } return nil, LinkNotFoundError{fmt.Errorf("Link %s not found", name)} } @@ -1607,6 +1861,9 @@ func (h *Handle) LinkByName(name string) (Link, error) { req.AddData(attr) nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) + if len(name) > 15 { + nameData = nl.NewRtAttr(unix.IFLA_ALT_IFNAME, nl.ZeroTerminated(name)) + } req.AddData(nameData) link, err := execGetLink(req) @@ -1712,9 +1969,6 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.Flags = linkFlags(msg.Flags) base.EncapType = msg.EncapType() base.NetNsID = -1 - if msg.Flags&unix.IFF_PROMISC != 0 { - base.Promisc = 1 - } if msg.Flags&unix.IFF_ALLMULTI != 0 { base.Allmulti = 1 } @@ -1750,6 +2004,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Bridge{} case "vlan": link = &Vlan{} + case "netkit": + link = &Netkit{} case "veth": link = &Veth{} case "wireguard": @@ -1807,6 +2063,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } switch linkType { + case "netkit": + parseNetkitData(link, data) case "vlan": parseVlanData(link, data) case "vxlan": @@ -1897,6 +2155,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.Name = string(attr.Value[:len(attr.Value)-1]) case unix.IFLA_MTU: base.MTU = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_PROMISCUITY: + base.Promisc = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_LINK: base.ParentIndex = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_MASTER: @@ -1931,16 +2191,38 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { protinfo := parseProtinfo(attrs) base.Protinfo = &protinfo } + case unix.IFLA_PROP_LIST | unix.NLA_F_NESTED: + attrs, err := nl.ParseRouteAttr(attr.Value[:]) + if err != nil { + return nil, err + } + + base.AltNames = []string{} + for _, attr := range attrs { + if attr.Attr.Type == unix.IFLA_ALT_IFNAME { + base.AltNames = append(base.AltNames, nl.BytesToString(attr.Value)) + } + } case unix.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) case unix.IFLA_PHYS_SWITCH_ID: base.PhysSwitchID = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_GSO_MAX_SIZE: - base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_TSO_MAX_SEGS: + base.TSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_TSO_MAX_SIZE: + base.TSOMaxSize = native.Uint32(attr.Value[0:4]) case unix.IFLA_GSO_MAX_SEGS: base.GSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_MAX_SIZE: + base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GRO_MAX_SIZE: + base.GROMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_IPV4_MAX_SIZE: + base.GSOIPv4MaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GRO_IPV4_MAX_SIZE: + base.GROIPv4MaxSize = native.Uint32(attr.Value[0:4]) case unix.IFLA_VFINFO_LIST: data, err := nl.ParseRouteAttr(attr.Value) if err != nil { @@ -1957,6 +2239,13 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.NumRxQueues = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_GROUP: base.Group = native.Uint32(attr.Value[0:4]) + case unix.IFLA_PERM_ADDRESS: + for _, b := range attr.Value { + if b != 0 { + base.PermHWAddr = attr.Value[:] + break + } + } } } @@ -2069,21 +2358,24 @@ type LinkUpdate struct { // LinkSubscribe takes a chan down which notifications will be sent // when links change. Close the 'done' chan to stop subscription. func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // LinkSubscribeAt works like LinkSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribeAt(ns, netns.None(), ch, done, nil, false) + return linkSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // LinkSubscribeOptions contains a set of options to use with // LinkSubscribeWithOptions. type LinkSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // LinkSubscribeWithOptions work like LinkSubscribe but enable to @@ -2094,14 +2386,27 @@ func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done @@ -2182,6 +2487,16 @@ func (h *Handle) LinkSetGuard(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD) } +// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface +func LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error { + return pkgHandle.LinkSetBRSlaveGroupFwdMask(link, mask) +} + +// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface +func (h *Handle) LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error { + return h.setProtinfoAttrRawVal(link, nl.Uint16Attr(mask), nl.IFLA_BRPORT_GROUP_FWD_MASK) +} + func LinkSetFastLeave(link Link, mode bool) error { return pkgHandle.LinkSetFastLeave(link, mode) } @@ -2214,6 +2529,14 @@ func (h *Handle) LinkSetFlood(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD) } +func LinkSetIsolated(link Link, mode bool) error { + return pkgHandle.LinkSetIsolated(link, mode) +} + +func (h *Handle) LinkSetIsolated(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_ISOLATED) +} + func LinkSetBrProxyArp(link Link, mode bool) error { return pkgHandle.LinkSetBrProxyArp(link, mode) } @@ -2230,7 +2553,15 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP_WIFI) } -func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { +func LinkSetBrNeighSuppress(link Link, mode bool) error { + return pkgHandle.LinkSetBrNeighSuppress(link, mode) +} + +func (h *Handle) LinkSetBrNeighSuppress(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_NEIGH_SUPPRESS) +} + +func (h *Handle) setProtinfoAttrRawVal(link Link, val []byte, attr int) error { base := link.Attrs() h.ensureIndex(base) req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) @@ -2240,7 +2571,7 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { req.AddData(msg) br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) - br.AddRtAttr(attr, boolToByte(mode)) + br.AddRtAttr(attr, val) req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { @@ -2248,6 +2579,9 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { } return nil } +func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { + return h.setProtinfoAttrRawVal(link, boolToByte(mode), attr) +} // LinkSetTxQLen sets the transaction queue length for the link. // Equivalent to: `ip link set $link txqlen $qlen` @@ -2305,6 +2639,80 @@ func (h *Handle) LinkSetGroup(link Link, group int) error { return err } +func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { + if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil { + return fmt.Errorf("netkit doesn't support setting Ethernet") + } + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + // Kernel will return error if trying to change the mode of an existing netkit device + data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode))) + data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy))) + data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy))) + + if (flag & unix.NLM_F_EXCL) == 0 { + // Modifying peer link attributes will not take effect + return nil + } + + peer := data.AddRtAttr(nl.IFLA_NETKIT_PEER_INFO, nil) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + if nk.peerLinkAttrs.Flags&net.FlagUp != 0 { + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP + } + if nk.peerLinkAttrs.Index != 0 { + msg.Index = int32(nk.peerLinkAttrs.Index) + } + peer.AddChild(msg) + if nk.peerLinkAttrs.Name != "" { + peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(nk.peerLinkAttrs.Name)) + } + if nk.peerLinkAttrs.MTU > 0 { + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(nk.peerLinkAttrs.MTU))) + } + if nk.peerLinkAttrs.GSOMaxSegs > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSegs)) + } + if nk.peerLinkAttrs.GSOMaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSize)) + } + if nk.peerLinkAttrs.GSOIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOIPv4MaxSize)) + } + if nk.peerLinkAttrs.GROIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GROIPv4MaxSize)) + } + if nk.peerLinkAttrs.Namespace != nil { + switch ns := nk.peerLinkAttrs.Namespace.(type) { + case NsPid: + peer.AddRtAttr(unix.IFLA_NET_NS_PID, nl.Uint32Attr(uint32(ns))) + case NsFd: + peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns))) + } + } + return nil +} + +func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { + netkit := link.(*Netkit) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_NETKIT_PRIMARY: + isPrimary := datum.Value[0:1][0] + if isPrimary != 0 { + netkit.isPrimary = true + } + case nl.IFLA_NETKIT_MODE: + netkit.Mode = NetkitMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_POLICY: + netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_PEER_POLICY: + netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + } + } +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { @@ -2539,11 +2947,30 @@ func parseIPVtapData(link Link, data []syscall.NetlinkRouteAttr) { } } +func addMacvtapAttrs(macvtap *Macvtap, linkInfo *nl.RtAttr) { + addMacvlanAttrs(&macvtap.Macvlan, linkInfo) +} + func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvtap) parseMacvlanData(&macv.Macvlan, data) } +func addMacvlanAttrs(macvlan *Macvlan, linkInfo *nl.RtAttr) { + var data *nl.RtAttr + + if macvlan.Mode != MACVLAN_MODE_DEFAULT || macvlan.BCQueueLen > 0 { + data = linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + } + + if macvlan.Mode != MACVLAN_MODE_DEFAULT { + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macvlan.Mode])) + } + if macvlan.BCQueueLen > 0 { + data.AddRtAttr(nl.IFLA_MACVLAN_BC_QUEUE_LEN, nl.Uint32Attr(macvlan.BCQueueLen)) + } +} + func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvlan) for _, datum := range data { @@ -2571,6 +2998,10 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { for _, macDatum := range macs { macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) } + case nl.IFLA_MACVLAN_BC_QUEUE_LEN: + macv.BCQueueLen = native.Uint32(datum.Value[0:4]) + case nl.IFLA_MACVLAN_BC_QUEUE_LEN_USED: + macv.UsedBCQueueLen = native.Uint32(datum.Value[0:4]) } } } @@ -2599,10 +3030,13 @@ func linkFlags(rawFlags uint32) net.Flags { func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if geneve.InnerProtoInherit { + data.AddRtAttr(nl.IFLA_GENEVE_INNER_PROTO_INHERIT, []byte{}) + } + if geneve.FlowBased { - // In flow based mode, no other attributes need to be configured - linkInfo.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, boolAttr(geneve.FlowBased)) - return + geneve.ID = 0 + data.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, []byte{}) } if ip := geneve.Remote; ip != nil { @@ -2628,6 +3062,8 @@ func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { if geneve.Tos != 0 { data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos)) } + + data.AddRtAttr(nl.IFLA_GENEVE_DF, nl.Uint8Attr(uint8(geneve.Df))) } func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2644,6 +3080,10 @@ func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { geneve.Ttl = uint8(datum.Value[0]) case nl.IFLA_GENEVE_TOS: geneve.Tos = uint8(datum.Value[0]) + case nl.IFLA_GENEVE_COLLECT_METADATA: + geneve.FlowBased = true + case nl.IFLA_GENEVE_INNER_PROTO_INHERIT: + geneve.InnerProtoInherit = true } } } @@ -2653,7 +3093,7 @@ func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { if gretap.FlowBased { // In flow based mode, no other attributes need to be configured - data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased)) + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) return } @@ -2736,6 +3176,12 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if gre.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) + return + } + if ip := gre.Local; ip != nil { if ip.To4() != nil { ip = ip.To4() @@ -2806,6 +3252,8 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre.EncapSport = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_ENCAP_DPORT: gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_COLLECT_METADATA: + gre.FlowBased = true } } } @@ -2846,14 +3294,14 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { } func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if iptun.FlowBased { // In flow based mode, no other attributes need to be configured - linkInfo.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) + data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{}) return } - data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) - ip := iptun.Local.To4() if ip != nil { data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) @@ -2880,10 +3328,6 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun := link.(*Iptun) for _, datum := range data { - // NOTE: same with vxlan, ip tunnel may also has null datum.Value - if len(datum.Value) == 0 { - continue - } switch datum.Attr.Type { case nl.IFLA_IPTUN_LOCAL: iptun.Local = net.IP(datum.Value[0:4]) @@ -2914,6 +3358,12 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if ip6tnl.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{}) + return + } + if ip6tnl.Link != 0 { data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link)) } @@ -2968,6 +3418,8 @@ func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) { ip6tnl.EncapSport = ntohs(datum.Value[0:2]) case nl.IFLA_IPTUN_ENCAP_DPORT: ip6tnl.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_COLLECT_METADATA: + ip6tnl.FlowBased = true } } } @@ -3115,6 +3567,12 @@ func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { if bridge.VlanFiltering != nil { data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering)) } + if bridge.VlanDefaultPVID != nil { + data.AddRtAttr(nl.IFLA_BR_VLAN_DEFAULT_PVID, nl.Uint16Attr(*bridge.VlanDefaultPVID)) + } + if bridge.GroupFwdMask != nil { + data.AddRtAttr(nl.IFLA_BR_GROUP_FWD_MASK, nl.Uint16Attr(*bridge.GroupFwdMask)) + } } func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { @@ -3133,6 +3591,12 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BR_VLAN_FILTERING: vlanFiltering := datum.Value[0] == 1 br.VlanFiltering = &vlanFiltering + case nl.IFLA_BR_VLAN_DEFAULT_PVID: + vlanDefaultPVID := native.Uint16(datum.Value[0:2]) + br.VlanDefaultPVID = &vlanDefaultPVID + case nl.IFLA_BR_GROUP_FWD_MASK: + mask := native.Uint16(datum.Value[0:2]) + br.GroupFwdMask = &mask } } } @@ -3174,12 +3638,17 @@ func parseVfInfoList(data []syscall.NetlinkRouteAttr) ([]VfInfo, error) { if err != nil { return nil, err } - vfs = append(vfs, parseVfInfo(vfAttrs, i)) + + vf, err := parseVfInfo(vfAttrs, i) + if err != nil { + return nil, err + } + vfs = append(vfs, vf) } return vfs, nil } -func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { +func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) (VfInfo, error) { vf := VfInfo{ID: id} for _, element := range data { switch element.Attr.Type { @@ -3190,6 +3659,12 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { vl := nl.DeserializeVfVlan(element.Value[:]) vf.Vlan = int(vl.Vlan) vf.Qos = int(vl.Qos) + case nl.IFLA_VF_VLAN_LIST: + vfVlanInfoList, err := nl.DeserializeVfVlanList(element.Value[:]) + if err != nil { + return vf, err + } + vf.VlanProto = int(vfVlanInfoList[0].VlanProto) case nl.IFLA_VF_TX_RATE: txr := nl.DeserializeVfTxRate(element.Value[:]) vf.TxRate = int(txr.Rate) @@ -3223,7 +3698,7 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { vf.Trust = result.Setting } } - return vf + return vf, nil } func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { @@ -3246,8 +3721,7 @@ func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { } } -// LinkSetBondSlave add slave to bond link via ioctl interface. -func LinkSetBondSlave(link Link, master *Bond) error { +func ioctlBondSlave(cmd uintptr, link Link, master *Bond) error { fd, err := getSocketUDP() if err != nil { return err @@ -3255,10 +3729,38 @@ func LinkSetBondSlave(link Link, master *Bond) error { defer syscall.Close(fd) ifreq := newIocltSlaveReq(link.Attrs().Name, master.Attrs().Name) - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), unix.SIOCBONDENSLAVE, uintptr(unsafe.Pointer(ifreq))) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), cmd, uintptr(unsafe.Pointer(ifreq))) if errno != 0 { - return fmt.Errorf("Failed to enslave %q to %q, errno=%v", link.Attrs().Name, master.Attrs().Name, errno) + return fmt.Errorf("errno=%v", errno) + } + return nil +} + +// LinkSetBondSlaveActive sets specified slave to ACTIVE in an `active-backup` bond link via ioctl interface. +// +// Multiple calls keeps the status unchanged(shown in the unit test). +func LinkSetBondSlaveActive(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDCHANGEACTIVE, link, master) + if err != nil { + return fmt.Errorf("Failed to set slave %q active in %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlave add slave to bond link via ioctl interface. +func LinkSetBondSlave(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDENSLAVE, link, master) + if err != nil { + return fmt.Errorf("Failed to enslave %q to %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlave removes specified slave from bond link via ioctl interface. +func LinkDelBondSlave(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDRELEASE, link, master) + if err != nil { + return fmt.Errorf("Failed to del slave %q from %q, %v", link.Attrs().Name, master.Attrs().Name, err) } return nil } diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index 4c1e76635..2d93044a6 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -339,13 +339,13 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // NeighSubscribe takes a chan down which notifications will be sent // when neighbors are added or deleted. Close the 'done' chan to stop subscription. func NeighSubscribe(ch chan<- NeighUpdate, done <-chan struct{}) error { - return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // NeighSubscribeAt works like NeighSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func NeighSubscribeAt(ns netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}) error { - return neighSubscribeAt(ns, netns.None(), ch, done, nil, false) + return neighSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // NeighSubscribeOptions contains a set of options to use with @@ -354,6 +354,11 @@ type NeighSubscribeOptions struct { Namespace *netns.NsHandle ErrorCallback func(error) ListExisting bool + + // max size is based on value of /proc/sys/net/core/rmem_max + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // NeighSubscribeWithOptions work like NeighSubscribe but enable to @@ -364,16 +369,17 @@ func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, opti none := netns.None() options.Namespace = &none } - return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH) makeRequest := func(family int) error { - req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, - unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(family) - req.AddData(infmsg) + req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + ndmsg := &Ndmsg{Family: uint8(family)} + req.AddData(ndmsg) if err := s.Send(req); err != nil { return err } @@ -382,6 +388,17 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done @@ -427,12 +444,12 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < continue } if m.Header.Type == unix.NLMSG_ERROR { - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { + nError := int32(native.Uint32(m.Data[0:4])) + if nError == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(syscall.Errno(-nError)) } return } diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index 98d2c0dbf..da12c42a5 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -52,6 +52,10 @@ func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { return ErrNotImplemented } +func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return ErrNotImplemented +} + func LinkSetVfTxRate(link Link, vf, rate int) error { return ErrNotImplemented } @@ -124,6 +128,22 @@ func LinkSetTxQLen(link Link, qlen int) error { return ErrNotImplemented } +func LinkSetGSOMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGROMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + func LinkAdd(link Link) error { return ErrNotImplemented } @@ -184,6 +204,10 @@ func RouteAppend(route *Route) error { return ErrNotImplemented } +func RouteChange(route *Route) error { + return ErrNotImplemented +} + func RouteDel(route *Route) error { return ErrNotImplemented } @@ -216,6 +240,10 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { return nil, ErrNotImplemented } +func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return nil, ErrNotImplemented +} + func XfrmStateAdd(policy *XfrmState) error { return ErrNotImplemented } @@ -255,3 +283,7 @@ func NeighDeserialize(m []byte) (*Neigh, error) { func SocketGet(local, remote net.Addr) (*Socket, error) { return nil, ErrNotImplemented } + +func SocketDestroy(local, remote net.Addr) (*Socket, error) { + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go index 183601803..6989d1edc 100644 --- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -15,6 +15,38 @@ var L4ProtoMap = map[uint8]string{ 17: "udp", } +// From https://git.netfilter.org/libnetfilter_conntrack/tree/include/libnetfilter_conntrack/libnetfilter_conntrack_tcp.h +// enum tcp_state { +// TCP_CONNTRACK_NONE, +// TCP_CONNTRACK_SYN_SENT, +// TCP_CONNTRACK_SYN_RECV, +// TCP_CONNTRACK_ESTABLISHED, +// TCP_CONNTRACK_FIN_WAIT, +// TCP_CONNTRACK_CLOSE_WAIT, +// TCP_CONNTRACK_LAST_ACK, +// TCP_CONNTRACK_TIME_WAIT, +// TCP_CONNTRACK_CLOSE, +// TCP_CONNTRACK_LISTEN, /* obsolete */ +// #define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN +// TCP_CONNTRACK_MAX, +// TCP_CONNTRACK_IGNORE +// }; +const ( + TCP_CONNTRACK_NONE = 0 + TCP_CONNTRACK_SYN_SENT = 1 + TCP_CONNTRACK_SYN_RECV = 2 + TCP_CONNTRACK_ESTABLISHED = 3 + TCP_CONNTRACK_FIN_WAIT = 4 + TCP_CONNTRACK_CLOSE_WAIT = 5 + TCP_CONNTRACK_LAST_ACK = 6 + TCP_CONNTRACK_TIME_WAIT = 7 + TCP_CONNTRACK_CLOSE = 8 + TCP_CONNTRACK_LISTEN = 9 + TCP_CONNTRACK_SYN_SENT2 = 9 + TCP_CONNTRACK_MAX = 10 + TCP_CONNTRACK_IGNORE = 11 +) + // All the following constants are coming from: // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -31,6 +63,7 @@ var L4ProtoMap = map[uint8]string{ // IPCTNL_MSG_MAX // }; const ( + IPCTNL_MSG_CT_NEW = 0 IPCTNL_MSG_CT_GET = 1 IPCTNL_MSG_CT_DELETE = 2 ) @@ -88,7 +121,10 @@ const ( CTA_COUNTERS_REPLY = 10 CTA_USE = 11 CTA_ID = 12 + CTA_ZONE = 18 CTA_TIMESTAMP = 20 + CTA_LABELS = 22 + CTA_LABELS_MASK = 23 ) // enum ctattr_tuple { @@ -149,7 +185,10 @@ const ( // }; // #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) const ( + CTA_PROTOINFO_UNSPEC = 0 CTA_PROTOINFO_TCP = 1 + CTA_PROTOINFO_DCCP = 2 + CTA_PROTOINFO_SCTP = 3 ) // enum ctattr_protoinfo_tcp { diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go index 2995da492..956367b29 100644 --- a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go @@ -9,39 +9,56 @@ const ( ) const ( - DEVLINK_CMD_GET = 1 - DEVLINK_CMD_PORT_GET = 5 - DEVLINK_CMD_PORT_SET = 6 - DEVLINK_CMD_PORT_NEW = 7 - DEVLINK_CMD_PORT_DEL = 8 - DEVLINK_CMD_ESWITCH_GET = 29 - DEVLINK_CMD_ESWITCH_SET = 30 - DEVLINK_CMD_INFO_GET = 51 + DEVLINK_CMD_GET = 1 + DEVLINK_CMD_PORT_GET = 5 + DEVLINK_CMD_PORT_SET = 6 + DEVLINK_CMD_PORT_NEW = 7 + DEVLINK_CMD_PORT_DEL = 8 + DEVLINK_CMD_ESWITCH_GET = 29 + DEVLINK_CMD_ESWITCH_SET = 30 + DEVLINK_CMD_RESOURCE_DUMP = 36 + DEVLINK_CMD_PARAM_GET = 38 + DEVLINK_CMD_PARAM_SET = 39 + DEVLINK_CMD_INFO_GET = 51 ) const ( - DEVLINK_ATTR_BUS_NAME = 1 - DEVLINK_ATTR_DEV_NAME = 2 - DEVLINK_ATTR_PORT_INDEX = 3 - DEVLINK_ATTR_PORT_TYPE = 4 - DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6 - DEVLINK_ATTR_PORT_NETDEV_NAME = 7 - DEVLINK_ATTR_PORT_IBDEV_NAME = 8 - DEVLINK_ATTR_ESWITCH_MODE = 25 - DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 - DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 - DEVLINK_ATTR_PORT_FLAVOUR = 77 - DEVLINK_ATTR_INFO_DRIVER_NAME = 98 - DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99 - DEVLINK_ATTR_INFO_VERSION_FIXED = 100 - DEVLINK_ATTR_INFO_VERSION_RUNNING = 101 - DEVLINK_ATTR_INFO_VERSION_STORED = 102 - DEVLINK_ATTR_INFO_VERSION_NAME = 103 - DEVLINK_ATTR_INFO_VERSION_VALUE = 104 - DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127 - DEVLINK_ATTR_PORT_FUNCTION = 145 - DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150 - DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164 + DEVLINK_ATTR_BUS_NAME = 1 + DEVLINK_ATTR_DEV_NAME = 2 + DEVLINK_ATTR_PORT_INDEX = 3 + DEVLINK_ATTR_PORT_TYPE = 4 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 8 + DEVLINK_ATTR_ESWITCH_MODE = 25 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 + DEVLINK_ATTR_RESOURCE_LIST = 63 /* nested */ + DEVLINK_ATTR_RESOURCE = 64 /* nested */ + DEVLINK_ATTR_RESOURCE_NAME = 65 /* string */ + DEVLINK_ATTR_RESOURCE_ID = 66 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE = 67 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69 /* u8 */ + DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72 /* u64 */ + DEVLINK_ATTR_RESOURCE_UNIT = 73 /* u8 */ + DEVLINK_ATTR_RESOURCE_OCC = 74 /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75 /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76 /* u64 */ + DEVLINK_ATTR_PORT_FLAVOUR = 77 + DEVLINK_ATTR_INFO_DRIVER_NAME = 98 + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99 + DEVLINK_ATTR_INFO_VERSION_FIXED = 100 + DEVLINK_ATTR_INFO_VERSION_RUNNING = 101 + DEVLINK_ATTR_INFO_VERSION_STORED = 102 + DEVLINK_ATTR_INFO_VERSION_NAME = 103 + DEVLINK_ATTR_INFO_VERSION_VALUE = 104 + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127 + DEVLINK_ATTR_PORT_FUNCTION = 145 + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150 + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164 ) const ( @@ -94,3 +111,32 @@ const ( DEVLINK_PORT_FN_OPSTATE_DETACHED = 0 DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1 ) + +const ( + DEVLINK_RESOURCE_UNIT_ENTRY uint8 = 0 +) + +const ( + DEVLINK_ATTR_PARAM = iota + 80 /* nested */ + DEVLINK_ATTR_PARAM_NAME /* string */ + DEVLINK_ATTR_PARAM_GENERIC /* flag */ + DEVLINK_ATTR_PARAM_TYPE /* u8 */ + DEVLINK_ATTR_PARAM_VALUES_LIST /* nested */ + DEVLINK_ATTR_PARAM_VALUE /* nested */ + DEVLINK_ATTR_PARAM_VALUE_DATA /* dynamic */ + DEVLINK_ATTR_PARAM_VALUE_CMODE /* u8 */ +) + +const ( + DEVLINK_PARAM_TYPE_U8 = 1 + DEVLINK_PARAM_TYPE_U16 = 2 + DEVLINK_PARAM_TYPE_U32 = 3 + DEVLINK_PARAM_TYPE_STRING = 5 + DEVLINK_PARAM_TYPE_BOOL = 6 +) + +const ( + DEVLINK_PARAM_CMODE_RUNTIME = iota + DEVLINK_PARAM_CMODE_DRIVERINIT + DEVLINK_PARAM_CMODE_PERMANENT +) diff --git a/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go new file mode 100644 index 000000000..d5dd69e0c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go @@ -0,0 +1,21 @@ +package nl + +// id's of route attribute from https://elixir.bootlin.com/linux/v5.17.3/source/include/uapi/linux/lwtunnel.h#L38 +// the value's size are specified in https://elixir.bootlin.com/linux/v5.17.3/source/net/ipv4/ip_tunnel_core.c#L928 + +const ( + LWTUNNEL_IP6_UNSPEC = iota + LWTUNNEL_IP6_ID + LWTUNNEL_IP6_DST + LWTUNNEL_IP6_SRC + LWTUNNEL_IP6_HOPLIMIT + LWTUNNEL_IP6_TC + LWTUNNEL_IP6_FLAGS + LWTUNNEL_IP6_PAD // not implemented + LWTUNNEL_IP6_OPTS // not implemented + __LWTUNNEL_IP6_MAX +) + + + + diff --git a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go index a60b4b09d..89dd009df 100644 --- a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go @@ -88,6 +88,11 @@ const ( SET_ATTR_CREATE_MAX ) +const ( + IPSET_ATTR_IPADDR_IPV4 = 1 + IPSET_ATTR_IPADDR_IPV6 = 2 +) + /* ADT specific attributes */ const ( IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + iota + 1 diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index e10edbc09..0b5be470c 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -3,6 +3,7 @@ package nl import ( "bytes" "encoding/binary" + "fmt" "unsafe" ) @@ -30,6 +31,16 @@ const ( IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL ) +const ( + IFLA_NETKIT_UNSPEC = iota + IFLA_NETKIT_PEER_INFO + IFLA_NETKIT_PRIMARY + IFLA_NETKIT_POLICY + IFLA_NETKIT_PEER_POLICY + IFLA_NETKIT_MODE + IFLA_NETKIT_MAX = IFLA_NETKIT_MODE +) + const ( VETH_INFO_UNSPEC = iota VETH_INFO_PEER @@ -85,7 +96,37 @@ const ( IFLA_BRPORT_PROXYARP IFLA_BRPORT_LEARNING_SYNC IFLA_BRPORT_PROXYARP_WIFI - IFLA_BRPORT_MAX = IFLA_BRPORT_PROXYARP_WIFI + IFLA_BRPORT_ROOT_ID + IFLA_BRPORT_BRIDGE_ID + IFLA_BRPORT_DESIGNATED_PORT + IFLA_BRPORT_DESIGNATED_COST + IFLA_BRPORT_ID + IFLA_BRPORT_NO + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK + IFLA_BRPORT_CONFIG_PENDING + IFLA_BRPORT_MESSAGE_AGE_TIMER + IFLA_BRPORT_FORWARD_DELAY_TIMER + IFLA_BRPORT_HOLD_TIMER + IFLA_BRPORT_FLUSH + IFLA_BRPORT_MULTICAST_ROUTER + IFLA_BRPORT_PAD + IFLA_BRPORT_MCAST_FLOOD + IFLA_BRPORT_MCAST_TO_UCAST + IFLA_BRPORT_VLAN_TUNNEL + IFLA_BRPORT_BCAST_FLOOD + IFLA_BRPORT_GROUP_FWD_MASK + IFLA_BRPORT_NEIGH_SUPPRESS + IFLA_BRPORT_ISOLATED + IFLA_BRPORT_BACKUP_PORT + IFLA_BRPORT_MRP_RING_OPEN + IFLA_BRPORT_MRP_IN_OPEN + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT + IFLA_BRPORT_LOCKED + IFLA_BRPORT_MAB + IFLA_BRPORT_MCAST_N_GROUPS + IFLA_BRPORT_MCAST_MAX_GROUPS + IFLA_BRPORT_MAX = IFLA_BRPORT_MCAST_MAX_GROUPS ) const ( @@ -103,7 +144,9 @@ const ( IFLA_MACVLAN_MACADDR IFLA_MACVLAN_MACADDR_DATA IFLA_MACVLAN_MACADDR_COUNT - IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_BC_QUEUE_LEN + IFLA_MACVLAN_BC_QUEUE_LEN_USED + IFLA_MACVLAN_MAX = IFLA_MACVLAN_BC_QUEUE_LEN_USED ) const ( @@ -186,7 +229,10 @@ const ( IFLA_GENEVE_UDP_ZERO_CSUM6_TX IFLA_GENEVE_UDP_ZERO_CSUM6_RX IFLA_GENEVE_LABEL - IFLA_GENEVE_MAX = IFLA_GENEVE_LABEL + IFLA_GENEVE_TTL_INHERIT + IFLA_GENEVE_DF + IFLA_GENEVE_INNER_PROTO_INHERIT + IFLA_GENEVE_MAX = IFLA_GENEVE_INNER_PROTO_INHERIT ) const ( @@ -244,7 +290,15 @@ const ( IFLA_VF_TRUST /* Trust state of VF */ IFLA_VF_IB_NODE_GUID /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID /* VF Infiniband port GUID */ - IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID + IFLA_VF_VLAN_LIST /* nested list of vlans, option for QinQ */ + + IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID +) + +const ( + IFLA_VF_VLAN_INFO_UNSPEC = iota + IFLA_VF_VLAN_INFO /* VLAN ID, QoS and VLAN protocol */ + __IFLA_VF_VLAN_INFO_MAX ) const ( @@ -269,6 +323,7 @@ const ( const ( SizeofVfMac = 0x24 SizeofVfVlan = 0x0c + SizeofVfVlanInfo = 0x10 SizeofVfTxRate = 0x08 SizeofVfRate = 0x0c SizeofVfSpoofchk = 0x08 @@ -324,6 +379,49 @@ func (msg *VfVlan) Serialize() []byte { return (*(*[SizeofVfVlan]byte)(unsafe.Pointer(msg)))[:] } +func DeserializeVfVlanList(b []byte) ([]*VfVlanInfo, error) { + var vfVlanInfoList []*VfVlanInfo + attrs, err := ParseRouteAttr(b) + if err != nil { + return nil, err + } + + for _, element := range attrs { + if element.Attr.Type == IFLA_VF_VLAN_INFO { + vfVlanInfoList = append(vfVlanInfoList, DeserializeVfVlanInfo(element.Value)) + } + } + + if len(vfVlanInfoList) == 0 { + return nil, fmt.Errorf("VF vlan list is defined but no vf vlan info elements were found") + } + + return vfVlanInfoList, nil +} + +// struct ifla_vf_vlan_info { +// __u32 vf; +// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ +// __u32 qos; +// __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ +// }; + +type VfVlanInfo struct { + VfVlan + VlanProto uint16 +} + +func DeserializeVfVlanInfo(b []byte) *VfVlanInfo { + return &VfVlanInfo{ + *(*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0])), + binary.BigEndian.Uint16(b[SizeofVfVlan:SizeofVfVlanInfo]), + } +} + +func (msg *VfVlanInfo) Serialize() []byte { + return (*(*[SizeofVfVlanInfo]byte)(unsafe.Pointer(msg)))[:] +} + // struct ifla_vf_tx_rate { // __u32 vf; // __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 600b942b1..6cecc4517 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "fmt" "net" + "os" "runtime" "sync" "sync/atomic" @@ -330,6 +331,19 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { return msg } +type Uint32Bitfield struct { + Value uint32 + Selector uint32 +} + +func (a *Uint32Bitfield) Serialize() []byte { + return (*(*[SizeofUint32Bitfield]byte)(unsafe.Pointer(a)))[:] +} + +func DeserializeUint32Bitfield(data []byte) *Uint32Bitfield { + return (*Uint32Bitfield)(unsafe.Pointer(&data[0:SizeofUint32Bitfield][0])) +} + type Uint32Attribute struct { Type uint16 Value uint32 @@ -475,10 +489,30 @@ func (req *NetlinkRequest) AddRawData(data []byte) { req.RawData = append(req.RawData, data...) } -// Execute the request against a the given sockType. +// Execute the request against the given sockType. // Returns a list of netlink messages in serialized format, optionally filtered // by resType. func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { + var res [][]byte + err := req.ExecuteIter(sockType, resType, func(msg []byte) bool { + res = append(res, msg) + return true + }) + if err != nil { + return nil, err + } + return res, nil +} + +// ExecuteIter executes the request against the given sockType. +// Calls the provided callback func once for each netlink message. +// If the callback returns false, it is not called again, but +// the remaining messages are consumed/discarded. +// +// Thread safety: ExecuteIter holds a lock on the socket until +// it finishes iteration so the callback must not call back into +// the netlink API. +func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg []byte) bool) error { var ( s *NetlinkSocket err error @@ -495,18 +529,18 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro if s == nil { s, err = getNetlinkSocket(sockType) if err != nil { - return nil, err + return err } if err := s.SetSendTimeout(&SocketTimeoutTv); err != nil { - return nil, err + return err } if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil { - return nil, err + return err } if EnableErrorMessageReporting { if err := s.SetExtAck(true); err != nil { - return nil, err + return err } } @@ -517,36 +551,44 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro } if err := s.Send(req); err != nil { - return nil, err + return err } pid, err := s.GetPid() if err != nil { - return nil, err + return err } - var res [][]byte - done: for { msgs, from, err := s.Receive() if err != nil { - return nil, err + return err } if from.Pid != PidKernel { - return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) } for _, m := range msgs { if m.Header.Seq != req.Seq { if sharedSocket { continue } - return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) + return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) } if m.Header.Pid != pid { continue } + + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { + return syscall.Errno(unix.EINTR) + } + if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { + // NLMSG_DONE might have no payload, if so assume no error. + if m.Header.Type == unix.NLMSG_DONE && len(m.Data) == 0 { + break done + } + native := NativeEndian() errno := int32(native.Uint32(m.Data[0:4])) if errno == 0 { @@ -556,7 +598,7 @@ done: err = syscall.Errno(-errno) unreadData := m.Data[4:] - if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { + if m.Header.Flags&unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { // Skip the echoed request message. echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0])) unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):] @@ -568,8 +610,7 @@ done: switch attr.Type { case NLMSGERR_ATTR_MSG: - err = fmt.Errorf("%w: %s", err, string(attrData)) - + err = fmt.Errorf("%w: %s", err, unix.ByteSliceToString(attrData)) default: // TODO: handle other NLMSGERR_ATTR types } @@ -578,18 +619,26 @@ done: } } - return nil, err + return err } if resType != 0 && m.Header.Type != resType { continue } - res = append(res, m.Data) + if cont := f(m.Data); !cont { + // Drain the rest of the messages from the kernel but don't + // pass them to the iterator func. + f = dummyMsgIterFunc + } if m.Header.Flags&unix.NLM_F_MULTI == 0 { break done } } } - return res, nil + return nil +} + +func dummyMsgIterFunc(msg []byte) bool { + return true } // Create a new netlink request from proto and flags @@ -607,8 +656,9 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { } type NetlinkSocket struct { - fd int32 - lsa unix.SockaddrNetlink + fd int32 + file *os.File + lsa unix.SockaddrNetlink sync.Mutex } @@ -617,8 +667,13 @@ func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { if err != nil { return nil, err } + err = unix.SetNonblock(fd, true) + if err != nil { + return nil, err + } s := &NetlinkSocket{ - fd: int32(fd), + fd: int32(fd), + file: os.NewFile(uintptr(fd), "netlink"), } s.lsa.Family = unix.AF_NETLINK if err := unix.Bind(fd, &s.lsa); err != nil { @@ -649,12 +704,14 @@ func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSock // In case of success, the caller is expected to execute the returned function // at the end of the code that needs to be executed in the network namespace. // Example: -// func jobAt(...) error { -// d, err := executeInNetns(...) -// if err != nil { return err} -// defer d() -// < code which needs to be executed in specific netns> -// } +// +// func jobAt(...) error { +// d, err := executeInNetns(...) +// if err != nil { return err} +// defer d() +// < code which needs to be executed in specific netns> +// } +// // TODO: his function probably belongs to netns pkg. func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { var ( @@ -703,8 +760,13 @@ func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { if err != nil { return nil, err } + err = unix.SetNonblock(fd, true) + if err != nil { + return nil, err + } s := &NetlinkSocket{ - fd: int32(fd), + fd: int32(fd), + file: os.NewFile(uintptr(fd), "netlink"), } s.lsa.Family = unix.AF_NETLINK @@ -733,33 +795,36 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne } func (s *NetlinkSocket) Close() { - fd := int(atomic.SwapInt32(&s.fd, -1)) - unix.Close(fd) + s.file.Close() } func (s *NetlinkSocket) GetFd() int { - return int(atomic.LoadInt32(&s.fd)) + return int(s.fd) } func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - fd := int(atomic.LoadInt32(&s.fd)) - if fd < 0 { - return fmt.Errorf("Send called on a closed socket") - } - if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { - return err - } - return nil + return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa) } func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { - fd := int(atomic.LoadInt32(&s.fd)) - if fd < 0 { - return nil, nil, fmt.Errorf("Receive called on a closed socket") + rawConn, err := s.file.SyscallConn() + if err != nil { + return nil, nil, err + } + var ( + fromAddr *unix.SockaddrNetlink + rb [RECEIVE_BUFFER_SIZE]byte + nr int + from unix.Sockaddr + innerErr error + ) + err = rawConn.Read(func(fd uintptr) (done bool) { + nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0) + return innerErr != unix.EWOULDBLOCK + }) + if innerErr != nil { + err = innerErr } - var fromAddr *unix.SockaddrNetlink - var rb [RECEIVE_BUFFER_SIZE]byte - nr, from, err := unix.Recvfrom(fd, rb[:], 0) if err != nil { return nil, nil, err } @@ -770,8 +835,9 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli if nr < unix.NLMSG_HDRLEN { return nil, nil, fmt.Errorf("Got short response from netlink") } - rb2 := make([]byte, nr) - copy(rb2, rb[:nr]) + msgLen := nlmAlignOf(nr) + rb2 := make([]byte, msgLen) + copy(rb2, rb[:msgLen]) nl, err := syscall.ParseNetlinkMessage(rb2) if err != nil { return nil, nil, err @@ -793,6 +859,15 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) } +// SetReceiveBufferSize allows to set a receive buffer size on the socket +func (s *NetlinkSocket) SetReceiveBufferSize(size int, force bool) error { + opt := unix.SO_RCVBUF + if force { + opt = unix.SO_RCVBUFFORCE + } + return unix.SetsockoptInt(int(s.fd), unix.SOL_SOCKET, opt, size) +} + // SetExtAck requests error messages to be reported on the socket func (s *NetlinkSocket) SetExtAck(enable bool) error { var enableN int @@ -804,8 +879,7 @@ func (s *NetlinkSocket) SetExtAck(enable bool) error { } func (s *NetlinkSocket) GetPid() (uint32, error) { - fd := int(atomic.LoadInt32(&s.fd)) - lsa, err := unix.Getsockname(fd) + lsa, err := unix.Getsockname(int(s.fd)) if err != nil { return 0, err } @@ -849,6 +923,12 @@ func Uint16Attr(v uint16) []byte { return bytes } +func BEUint16Attr(v uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, v) + return bytes +} + func Uint32Attr(v uint32) []byte { native := NativeEndian() bytes := make([]byte, 4) @@ -856,6 +936,12 @@ func Uint32Attr(v uint32) []byte { return bytes } +func BEUint32Attr(v uint32) []byte { + bytes := make([]byte, 4) + binary.BigEndian.PutUint32(bytes, v) + return bytes +} + func Uint64Attr(v uint64) []byte { native := NativeEndian() bytes := make([]byte, 8) @@ -863,6 +949,12 @@ func Uint64Attr(v uint64) []byte { return bytes } +func BEUint64Attr(v uint64) []byte { + bytes := make([]byte, 8) + binary.BigEndian.PutUint64(bytes, v) + return bytes +} + func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { var attrs []syscall.NetlinkRouteAttr for len(b) >= unix.SizeofRtAttr { @@ -877,6 +969,22 @@ func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { return attrs, nil } +// ParseRouteAttrAsMap parses provided buffer that contains raw RtAttrs and returns a map of parsed +// atttributes indexed by attribute type or error if occured. +func ParseRouteAttrAsMap(b []byte) (map[uint16]syscall.NetlinkRouteAttr, error) { + attrMap := make(map[uint16]syscall.NetlinkRouteAttr) + + attrs, err := ParseRouteAttr(b) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + attrMap[attr.Attr.Type] = attr + } + return attrMap, nil +} + func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go index 03c1900ff..c26f3bf91 100644 --- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -48,7 +48,9 @@ type RtNexthop struct { } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) + return &RtNexthop{ + RtNexthop: *((*unix.RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0]))), + } } func (msg *RtNexthop) Len() int { diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go index 150017726..8172b8471 100644 --- a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go @@ -12,6 +12,7 @@ const ( SEG6_LOCAL_NH6 SEG6_LOCAL_IIF SEG6_LOCAL_OIF + SEG6_LOCAL_BPF __SEG6_LOCAL_MAX ) const ( @@ -34,6 +35,7 @@ const ( SEG6_LOCAL_ACTION_END_S // 12 SEG6_LOCAL_ACTION_END_AS // 13 SEG6_LOCAL_ACTION_END_AM // 14 + SEG6_LOCAL_ACTION_END_BPF // 15 __SEG6_LOCAL_ACTION_MAX ) const ( @@ -71,6 +73,8 @@ func SEG6LocalActionString(action int) string { return "End.AS" case SEG6_LOCAL_ACTION_END_AM: return "End.AM" + case SEG6_LOCAL_ACTION_END_BPF: + return "End.BPF" } return "unknown" } diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go index bdf6ba639..b5ba039ac 100644 --- a/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -46,6 +46,7 @@ const ( // socket diags related const ( SOCK_DIAG_BY_FAMILY = 20 /* linux.sock_diag.h */ + SOCK_DESTROY = 21 TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/ ) diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index eb05ff1cd..0720729a9 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -1,8 +1,13 @@ package nl import ( + "bytes" "encoding/binary" + "fmt" + "net" "unsafe" + + "golang.org/x/sys/unix" ) // LinkLayer @@ -42,7 +47,14 @@ const ( TCA_FCNT TCA_STATS2 TCA_STAB - TCA_MAX = TCA_STAB + TCA_PAD + TCA_DUMP_INVISIBLE + TCA_CHAIN + TCA_HW_OFFLOAD + TCA_INGRESS_BLOCK + TCA_EGRESS_BLOCK + TCA_DUMP_FLAGS + TCA_MAX = TCA_DUMP_FLAGS ) const ( @@ -56,6 +68,12 @@ const ( TCA_ACT_OPTIONS TCA_ACT_INDEX TCA_ACT_STATS + TCA_ACT_PAD + TCA_ACT_COOKIE + TCA_ACT_FLAGS + TCA_ACT_HW_STATS + TCA_ACT_USED_HW_STATS + TCA_ACT_IN_HW_COUNT TCA_ACT_MAX ) @@ -71,7 +89,11 @@ const ( TCA_STATS_RATE_EST TCA_STATS_QUEUE TCA_STATS_APP - TCA_STATS_MAX = TCA_STATS_APP + TCA_STATS_RATE_EST64 + TCA_STATS_PAD + TCA_STATS_BASIC_HW + TCA_STATS_PKT64 + TCA_STATS_MAX = TCA_STATS_PKT64 ) const ( @@ -83,12 +105,13 @@ const ( SizeofTcNetemCorr = 0x0c SizeofTcNetemReorder = 0x08 SizeofTcNetemCorrupt = 0x08 + SizeOfTcNetemRate = 0x10 SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14 SizeofTcHtbGlob = 0x14 SizeofTcU32Key = 0x10 SizeofTcU32Sel = 0x10 // without keys - SizeofTcGen = 0x14 + SizeofTcGen = 0x16 SizeofTcConnmark = SizeofTcGen + 0x04 SizeofTcCsum = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 @@ -98,6 +121,7 @@ const ( SizeofTcSfqQopt = 0x0b SizeofTcSfqRedStats = 0x18 SizeofTcSfqQoptV1 = SizeofTcSfqQopt + SizeofTcSfqRedStats + 0x1c + SizeofUint32Bitfield = 0x8 ) // struct tcmsg { @@ -131,6 +155,18 @@ func (x *TcMsg) Serialize() []byte { return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:] } +type Tcf struct { + Install uint64 + LastUse uint64 + Expires uint64 + FirstUse uint64 +} + +func DeserializeTcf(b []byte) *Tcf { + const size = int(unsafe.Sizeof(Tcf{})) + return (*Tcf)(unsafe.Pointer(&b[0:size][0])) +} + // struct tcamsg { // unsigned char tca_family; // unsigned char tca__pad1; @@ -337,6 +373,26 @@ func (x *TcNetemCorrupt) Serialize() []byte { return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:] } +// TcNetemRate is a struct that represents the rate of a netem qdisc +type TcNetemRate struct { + Rate uint32 + PacketOverhead int32 + CellSize uint32 + CellOverhead int32 +} + +func (msg *TcNetemRate) Len() int { + return SizeofTcRateSpec +} + +func DeserializeTcNetemRate(b []byte) *TcNetemRate { + return (*TcNetemRate)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0])) +} + +func (msg *TcNetemRate) Serialize() []byte { + return (*(*[SizeOfTcNetemRate]byte)(unsafe.Pointer(msg)))[:] +} + // struct tc_tbf_qopt { // struct tc_ratespec rate; // struct tc_ratespec peakrate; @@ -804,7 +860,8 @@ const ( TCA_SKBEDIT_MARK TCA_SKBEDIT_PAD TCA_SKBEDIT_PTYPE - TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK + TCA_SKBEDIT_MASK + TCA_SKBEDIT_MAX ) type TcSkbEdit struct { @@ -891,6 +948,10 @@ const ( TCA_FQ_FLOW_REFILL_DELAY // flow credit refill delay in usec TCA_FQ_ORPHAN_MASK // mask applied to orphaned skb hashes TCA_FQ_LOW_RATE_THRESHOLD // per packet delay under this rate + TCA_FQ_CE_THRESHOLD // DCTCP-like CE-marking threshold + TCA_FQ_TIMER_SLACK // timer slack + TCA_FQ_HORIZON // time horizon in us + TCA_FQ_HORIZON_DROP // drop packets beyond horizon, or cap their EDT ) const ( @@ -1018,6 +1079,9 @@ const ( __TCA_FLOWER_MAX ) +const TCA_CLS_FLAGS_SKIP_HW = 1 << 0 /* don't offload filter to HW */ +const TCA_CLS_FLAGS_SKIP_SW = 1 << 1 /* don't use filter in SW */ + // struct tc_sfq_qopt { // unsigned quantum; /* Bytes per round allocated to flow */ // int perturb_period; /* Period of hash perturbation */ @@ -1046,14 +1110,14 @@ func (x *TcSfqQopt) Serialize() []byte { return (*(*[SizeofTcSfqQopt]byte)(unsafe.Pointer(x)))[:] } -// struct tc_sfqred_stats { -// __u32 prob_drop; /* Early drops, below max threshold */ -// __u32 forced_drop; /* Early drops, after max threshold */ -// __u32 prob_mark; /* Marked packets, below max threshold */ -// __u32 forced_mark; /* Marked packets, after max threshold */ -// __u32 prob_mark_head; /* Marked packets, below max threshold */ -// __u32 forced_mark_head;/* Marked packets, after max threshold */ -// }; +// struct tc_sfqred_stats { +// __u32 prob_drop; /* Early drops, below max threshold */ +// __u32 forced_drop; /* Early drops, after max threshold */ +// __u32 prob_mark; /* Marked packets, below max threshold */ +// __u32 forced_mark; /* Marked packets, after max threshold */ +// __u32 prob_mark_head; /* Marked packets, below max threshold */ +// __u32 forced_mark_head;/* Marked packets, after max threshold */ +// }; type TcSfqRedStats struct { ProbDrop uint32 ForcedDrop uint32 @@ -1075,22 +1139,26 @@ func (x *TcSfqRedStats) Serialize() []byte { return (*(*[SizeofTcSfqRedStats]byte)(unsafe.Pointer(x)))[:] } -// struct tc_sfq_qopt_v1 { -// struct tc_sfq_qopt v0; -// unsigned int depth; /* max number of packets per flow */ -// unsigned int headdrop; +// struct tc_sfq_qopt_v1 { +// struct tc_sfq_qopt v0; +// unsigned int depth; /* max number of packets per flow */ +// unsigned int headdrop; +// // /* SFQRED parameters */ -// __u32 limit; /* HARD maximal flow queue length (bytes) */ -// __u32 qth_min; /* Min average length threshold (bytes) */ -// __u32 qth_max; /* Max average length threshold (bytes) */ -// unsigned char Wlog; /* log(W) */ -// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ -// unsigned char Scell_log; /* cell size for idle damping */ -// unsigned char flags; -// __u32 max_P; /* probability, high resolution */ +// +// __u32 limit; /* HARD maximal flow queue length (bytes) */ +// __u32 qth_min; /* Min average length threshold (bytes) */ +// __u32 qth_max; /* Max average length threshold (bytes) */ +// unsigned char Wlog; /* log(W) */ +// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ +// unsigned char Scell_log; /* cell size for idle damping */ +// unsigned char flags; +// __u32 max_P; /* probability, high resolution */ +// // /* SFQRED stats */ -// struct tc_sfqred_stats stats; -// }; +// +// struct tc_sfqred_stats stats; +// }; type TcSfqQoptV1 struct { TcSfqQopt Depth uint32 @@ -1117,3 +1185,427 @@ func DeserializeTcSfqQoptV1(b []byte) *TcSfqQoptV1 { func (x *TcSfqQoptV1) Serialize() []byte { return (*(*[SizeofTcSfqQoptV1]byte)(unsafe.Pointer(x)))[:] } + +// IPProto represents Flower ip_proto attribute +type IPProto uint8 + +const ( + IPPROTO_TCP IPProto = unix.IPPROTO_TCP + IPPROTO_UDP IPProto = unix.IPPROTO_UDP + IPPROTO_SCTP IPProto = unix.IPPROTO_SCTP + IPPROTO_ICMP IPProto = unix.IPPROTO_ICMP + IPPROTO_ICMPV6 IPProto = unix.IPPROTO_ICMPV6 +) + +func (i IPProto) Serialize() []byte { + arr := make([]byte, 1) + arr[0] = byte(i) + return arr +} + +func (i IPProto) String() string { + switch i { + case IPPROTO_TCP: + return "tcp" + case IPPROTO_UDP: + return "udp" + case IPPROTO_SCTP: + return "sctp" + case IPPROTO_ICMP: + return "icmp" + case IPPROTO_ICMPV6: + return "icmpv6" + } + return fmt.Sprintf("%d", i) +} + +const ( + MaxOffs = 128 + SizeOfPeditSel = 24 + SizeOfPeditKey = 24 + + TCA_PEDIT_KEY_EX_HTYPE = 1 + TCA_PEDIT_KEY_EX_CMD = 2 +) + +const ( + TCA_PEDIT_UNSPEC = iota + TCA_PEDIT_TM + TCA_PEDIT_PARMS + TCA_PEDIT_PAD + TCA_PEDIT_PARMS_EX + TCA_PEDIT_KEYS_EX + TCA_PEDIT_KEY_EX +) + +// /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It +// * means no specific header type - offset is relative to the network layer +// */ +type PeditHeaderType uint16 + +const ( + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = iota + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + __PEDIT_HDR_TYPE_MAX +) + +type PeditCmd uint16 + +const ( + TCA_PEDIT_KEY_EX_CMD_SET = 0 + TCA_PEDIT_KEY_EX_CMD_ADD = 1 +) + +type TcPeditSel struct { + TcGen + NKeys uint8 + Flags uint8 +} + +func DeserializeTcPeditKey(b []byte) *TcPeditKey { + return (*TcPeditKey)(unsafe.Pointer(&b[0:SizeOfPeditKey][0])) +} + +func DeserializeTcPedit(b []byte) (*TcPeditSel, []TcPeditKey) { + x := &TcPeditSel{} + copy((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(x)))[:SizeOfPeditSel], b) + + var keys []TcPeditKey + + next := SizeOfPeditKey + var i uint8 + for i = 0; i < x.NKeys; i++ { + keys = append(keys, *DeserializeTcPeditKey(b[next:])) + next += SizeOfPeditKey + } + + return x, keys +} + +type TcPeditKey struct { + Mask uint32 + Val uint32 + Off uint32 + At uint32 + OffMask uint32 + Shift uint32 +} + +type TcPeditKeyEx struct { + HeaderType PeditHeaderType + Cmd PeditCmd +} + +type TcPedit struct { + Sel TcPeditSel + Keys []TcPeditKey + KeysEx []TcPeditKeyEx + Extend uint8 +} + +func (p *TcPedit) Encode(parent *RtAttr) { + parent.AddRtAttr(TCA_ACT_KIND, ZeroTerminated("pedit")) + actOpts := parent.AddRtAttr(TCA_ACT_OPTIONS, nil) + + bbuf := bytes.NewBuffer(make([]byte, 0, int(unsafe.Sizeof(p.Sel)+unsafe.Sizeof(p.Keys)))) + + bbuf.Write((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(&p.Sel)))[:]) + + for i := uint8(0); i < p.Sel.NKeys; i++ { + bbuf.Write((*(*[SizeOfPeditKey]byte)(unsafe.Pointer(&p.Keys[i])))[:]) + } + actOpts.AddRtAttr(TCA_PEDIT_PARMS_EX, bbuf.Bytes()) + + exAttrs := actOpts.AddRtAttr(int(TCA_PEDIT_KEYS_EX|NLA_F_NESTED), nil) + for i := uint8(0); i < p.Sel.NKeys; i++ { + keyAttr := exAttrs.AddRtAttr(int(TCA_PEDIT_KEY_EX|NLA_F_NESTED), nil) + + htypeBuf := make([]byte, 2) + cmdBuf := make([]byte, 2) + + NativeEndian().PutUint16(htypeBuf, uint16(p.KeysEx[i].HeaderType)) + NativeEndian().PutUint16(cmdBuf, uint16(p.KeysEx[i].Cmd)) + + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_HTYPE, htypeBuf) + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_CMD, cmdBuf) + } +} + +func (p *TcPedit) SetEthDst(mac net.HardwareAddr) { + u32 := NativeEndian().Uint32(mac) + u16 := NativeEndian().Uint16(mac[4:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = uint32(u16) + tKey.Mask = 0xffff0000 + tKey.Off = 4 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetEthSrc(mac net.HardwareAddr) { + u16 := NativeEndian().Uint16(mac) + u32 := NativeEndian().Uint32(mac[2:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = uint32(u16) << 16 + tKey.Mask = 0x0000ffff + tKey.Off = 4 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Mask = 0 + tKey.Off = 8 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv6Src(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 8 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 20 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetDstIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Dst(ip) + } else { + p.SetIPv6Dst(ip) + } +} + +func (p *TcPedit) SetSrcIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Src(ip) + } else { + p.SetIPv6Src(ip) + } +} + +func (p *TcPedit) SetIPv6Dst(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 24 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 28 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 32 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 36 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Src(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Dst(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetDstPort only tcp and udp are supported to set port +func (p *TcPedit) SetDstPort(dstPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(dstPort)) << 16 + tKey.Mask = 0x0000ffff + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetSrcPort only tcp and udp are supported to set port +func (p *TcPedit) SetSrcPort(srcPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(srcPort)) + tKey.Mask = 0xffff0000 + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} diff --git a/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go new file mode 100644 index 000000000..f209125df --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go @@ -0,0 +1,41 @@ +package nl + +const ( + VDPA_GENL_NAME = "vdpa" + VDPA_GENL_VERSION = 0x1 +) + +const ( + VDPA_CMD_UNSPEC = iota + VDPA_CMD_MGMTDEV_NEW + VDPA_CMD_MGMTDEV_GET /* can dump */ + VDPA_CMD_DEV_NEW + VDPA_CMD_DEV_DEL + VDPA_CMD_DEV_GET /* can dump */ + VDPA_CMD_DEV_CONFIG_GET /* can dump */ + VDPA_CMD_DEV_VSTATS_GET +) + +const ( + VDPA_ATTR_UNSPEC = iota + VDPA_ATTR_MGMTDEV_BUS_NAME + VDPA_ATTR_MGMTDEV_DEV_NAME + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES + VDPA_ATTR_DEV_NAME + VDPA_ATTR_DEV_ID + VDPA_ATTR_DEV_VENDOR_ID + VDPA_ATTR_DEV_MAX_VQS + VDPA_ATTR_DEV_MAX_VQ_SIZE + VDPA_ATTR_DEV_MIN_VQ_SIZE + VDPA_ATTR_DEV_NET_CFG_MACADDR + VDPA_ATTR_DEV_NET_STATUS + VDPA_ATTR_DEV_NET_CFG_MAX_VQP + VDPA_ATTR_DEV_NET_CFG_MTU + VDPA_ATTR_DEV_NEGOTIATED_FEATURES + VDPA_ATTR_DEV_MGMTDEV_MAX_VQS + VDPA_ATTR_DEV_SUPPORTED_FEATURES + VDPA_ATTR_DEV_QUEUE_INDEX + VDPA_ATTR_DEV_VENDOR_ATTR_NAME + VDPA_ATTR_DEV_VENDOR_ATTR_VALUE + VDPA_ATTR_DEV_FEATURES +) diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index dce9073f7..cdb318ba5 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -131,7 +131,15 @@ func (x *XfrmAddress) ToIP() net.IP { return ip } -func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet { +// family is only used when x and prefixlen are both 0 +func (x *XfrmAddress) ToIPNet(prefixlen uint8, family uint16) *net.IPNet { + empty := [SizeofXfrmAddress]byte{} + if bytes.Equal(x[:], empty[:]) && prefixlen == 0 { + if family == FAMILY_V6 { + return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(int(prefixlen), 128)} + } + return &net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(int(prefixlen), 32)} + } ip := x.ToIP() if GetIPFamily(ip) == FAMILY_V4 { return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go index 43a947f22..e8920b9a6 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go @@ -15,6 +15,7 @@ const ( SizeofXfrmEncapTmpl = 0x18 SizeofXfrmUsersaFlush = 0x1 SizeofXfrmReplayStateEsn = 0x18 + SizeofXfrmReplayState = 0x0c ) const ( @@ -28,6 +29,11 @@ const ( XFRM_STATE_ESN = 128 ) +const ( + XFRM_SA_XFLAG_DONT_ENCAP_DSCP = 1 + XFRM_SA_XFLAG_OSEQ_MAY_WRAP = 2 +) + // struct xfrm_usersa_id { // xfrm_address_t daddr; // __be32 spi; @@ -103,6 +109,7 @@ func (msg *XfrmStats) Serialize() []byte { // }; // // #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +// #define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 // type XfrmUsersaInfo struct { @@ -332,3 +339,23 @@ func (msg *XfrmReplayStateEsn) Serialize() []byte { // We deliberately do not pass Bmp, as it gets set by the kernel. return (*(*[SizeofXfrmReplayStateEsn]byte)(unsafe.Pointer(msg)))[:] } + +// struct xfrm_replay_state { +// __u32 oseq; +// __u32 seq; +// __u32 bitmap; +// }; + +type XfrmReplayState struct { + OSeq uint32 + Seq uint32 + BitMap uint32 +} + +func DeserializeXfrmReplayState(b []byte) *XfrmReplayState { + return (*XfrmReplayState)(unsafe.Pointer(&b[0:SizeofXfrmReplayState][0])) +} + +func (msg *XfrmReplayState) Serialize() []byte { + return (*(*[SizeofXfrmReplayState]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/proc_event_linux.go b/vendor/github.com/vishvananda/netlink/proc_event_linux.go index 53bc59a6e..ac8762bd8 100644 --- a/vendor/github.com/vishvananda/netlink/proc_event_linux.go +++ b/vendor/github.com/vishvananda/netlink/proc_event_linux.go @@ -63,15 +63,6 @@ type ExitProcEvent struct { ParentTgid uint32 } -type ExitProcEvent2 struct { - ProcessPid uint32 - ProcessTgid uint32 - ExitCode uint32 - ExitSignal uint32 - ParentPid uint32 - ParentTgid uint32 -} - func (e *ExitProcEvent) Pid() uint32 { return e.ProcessPid } diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go index 60b23b374..0163cba3a 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo.go +++ b/vendor/github.com/vishvananda/netlink/protinfo.go @@ -6,14 +6,16 @@ import ( // Protinfo represents bridge flags from netlink. type Protinfo struct { - Hairpin bool - Guard bool - FastLeave bool - RootBlock bool - Learning bool - Flood bool - ProxyArp bool - ProxyArpWiFi bool + Hairpin bool + Guard bool + FastLeave bool + RootBlock bool + Learning bool + Flood bool + ProxyArp bool + ProxyArpWiFi bool + Isolated bool + NeighSuppress bool } // String returns a list of enabled flags @@ -47,6 +49,12 @@ func (prot *Protinfo) String() string { if prot.ProxyArpWiFi { boolStrings = append(boolStrings, "ProxyArpWiFi") } + if prot.Isolated { + boolStrings = append(boolStrings, "Isolated") + } + if prot.NeighSuppress { + boolStrings = append(boolStrings, "NeighSuppress") + } return strings.Join(boolStrings, " ") } diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 15b65123c..1ba25d3cd 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -68,6 +68,10 @@ func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) { pi.ProxyArp = byteToBool(info.Value[0]) case nl.IFLA_BRPORT_PROXYARP_WIFI: pi.ProxyArpWiFi = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_ISOLATED: + pi.Isolated = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_NEIGH_SUPPRESS: + pi.NeighSuppress = byteToBool(info.Value[0]) } } return diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go index f594c9c21..067743d39 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -17,19 +17,29 @@ const ( HANDLE_MIN_EGRESS = 0xFFFFFFF3 ) +const ( + HORIZON_DROP_POLICY_CAP = 0 + HORIZON_DROP_POLICY_DROP = 1 + HORIZON_DROP_POLICY_DEFAULT = 255 +) + type Qdisc interface { Attrs() *QdiscAttrs Type() string } +type QdiscStatistics ClassStatistics + // QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link, // has a handle, a parent and a refcnt. The root qdisc of a device should // have parent == HANDLE_ROOT. type QdiscAttrs struct { - LinkIndex int - Handle uint32 - Parent uint32 - Refcnt uint32 // read only + LinkIndex int + Handle uint32 + Parent uint32 + Refcnt uint32 // read only + IngressBlock *uint32 + Statistics *QdiscStatistics } func (q QdiscAttrs) String() string { @@ -113,6 +123,7 @@ type Htb struct { Defcls uint32 Debug uint32 DirectPkts uint32 + DirectQlen *uint32 } func NewHtb(attrs QdiscAttrs) *Htb { @@ -123,6 +134,7 @@ func NewHtb(attrs QdiscAttrs) *Htb { Rate2Quantum: 10, Debug: 0, DirectPkts: 0, + DirectQlen: nil, } } @@ -150,6 +162,7 @@ type NetemQdiscAttrs struct { ReorderCorr float32 // in % CorruptProb float32 // in % CorruptCorr float32 // in % + Rate64 uint64 } func (q NetemQdiscAttrs) String() string { @@ -174,6 +187,7 @@ type Netem struct { ReorderCorr uint32 CorruptProb uint32 CorruptCorr uint32 + Rate64 uint64 } func (netem *Netem) String() string { @@ -210,6 +224,19 @@ func (qdisc *Tbf) Type() string { return "tbf" } +// Clsact is a qdisc for adding filters +type Clsact struct { + QdiscAttrs +} + +func (qdisc *Clsact) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Clsact) Type() string { + return "clsact" +} + // Ingress is a qdisc for adding ingress filters type Ingress struct { QdiscAttrs @@ -278,22 +305,25 @@ type Fq struct { FlowDefaultRate uint32 FlowMaxRate uint32 // called BucketsLog under the hood - Buckets uint32 - FlowRefillDelay uint32 - LowRateThreshold uint32 + Buckets uint32 + FlowRefillDelay uint32 + LowRateThreshold uint32 + Horizon uint32 + HorizonDropPolicy uint8 } func (fq *Fq) String() string { return fmt.Sprintf( - "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}", - fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, + "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v, Horizon: %v, HorizonDropPolicy: %v}", + fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, fq.Horizon, fq.HorizonDropPolicy, ) } func NewFq(attrs QdiscAttrs) *Fq { return &Fq{ - QdiscAttrs: attrs, - Pacing: 1, + QdiscAttrs: attrs, + Pacing: 1, + HorizonDropPolicy: HORIZON_DROP_POLICY_DEFAULT, } } diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index e182e1cfe..e732ae3bd 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "strconv" "strings" + "sync" "syscall" "github.com/vishvananda/netlink/nl" @@ -17,6 +18,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { var lossCorr, delayCorr, duplicateCorr uint32 var reorderProb, reorderCorr uint32 var corruptProb, corruptCorr uint32 + var rate64 uint64 latency := nattrs.Latency loss := Percentage2u32(nattrs.Loss) @@ -57,6 +59,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { corruptProb = Percentage2u32(nattrs.CorruptProb) corruptCorr = Percentage2u32(nattrs.CorruptCorr) + rate64 = nattrs.Rate64 return &Netem{ QdiscAttrs: attrs, @@ -73,6 +76,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { ReorderCorr: reorderCorr, CorruptProb: corruptProb, CorruptCorr: corruptCorr, + Rate64: rate64, } } @@ -159,6 +163,9 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) + if qdisc.Attrs().IngressBlock != nil { + req.AddData(nl.NewRtAttr(nl.TCA_INGRESS_BLOCK, nl.Uint32Attr(*qdisc.Attrs().IngressBlock))) + } options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) @@ -194,7 +201,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { opt.Debug = qdisc.Debug opt.DirectPkts = qdisc.DirectPkts options.AddRtAttr(nl.TCA_HTB_INIT, opt.Serialize()) - // options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) + if qdisc.DirectQlen != nil { + options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, nl.Uint32Attr(*qdisc.DirectQlen)) + } case *Hfsc: opt := nl.TcHfscOpt{} opt.Defcls = qdisc.Defcls @@ -231,6 +240,19 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if reorder.Probability > 0 { options.AddRtAttr(nl.TCA_NETEM_REORDER, reorder.Serialize()) } + // Rate + if qdisc.Rate64 > 0 { + rate := nl.TcNetemRate{} + if qdisc.Rate64 >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_NETEM_RATE64, nl.Uint64Attr(qdisc.Rate64)) + rate.Rate = ^uint32(0) + } else { + rate.Rate = uint32(qdisc.Rate64) + } + options.AddRtAttr(nl.TCA_NETEM_RATE, rate.Serialize()) + } + case *Clsact: + options = nil case *Ingress: // ingress filters must use the proper handle if qdisc.Attrs().Parent != HANDLE_INGRESS { @@ -265,6 +287,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.Buckets > 0 { options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) } + if qdisc.PacketLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_PLIMIT, nl.Uint32Attr((uint32(qdisc.PacketLimit)))) + } if qdisc.LowRateThreshold > 0 { options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) } @@ -286,6 +311,12 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.FlowDefaultRate > 0 { options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) } + if qdisc.Horizon > 0 { + options.AddRtAttr(nl.TCA_FQ_HORIZON, nl.Uint32Attr(qdisc.Horizon)) + } + if qdisc.HorizonDropPolicy != HORIZON_DROP_POLICY_DEFAULT { + options.AddRtAttr(nl.TCA_FQ_HORIZON_DROP, nl.Uint8Attr(qdisc.HorizonDropPolicy)) + } case *Sfq: opt := nl.TcSfqQoptV1{} opt.TcSfqQopt.Quantum = qdisc.Quantum @@ -380,6 +411,8 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { qdisc = &Netem{} case "sfq": qdisc = &Sfq{} + case "clsact": + qdisc = &Clsact{} default: qdisc = &GenericQdisc{QdiscType: qdiscType} } @@ -442,6 +475,22 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { // no options for ingress } + case nl.TCA_INGRESS_BLOCK: + ingressBlock := new(uint32) + *ingressBlock = native.Uint32(attr.Value) + base.IngressBlock = ingressBlock + case nl.TCA_STATS: + s, err := parseTcStats(attr.Value) + if err != nil { + return nil, err + } + base.Statistics = (*QdiscStatistics)(s) + case nl.TCA_STATS2: + s, err := parseTcStats2(attr.Value) + if err != nil { + return nil, err + } + base.Statistics = (*QdiscStatistics)(s) } } *qdisc.Attrs() = base @@ -479,8 +528,8 @@ func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { htb.Debug = opt.Debug htb.DirectPkts = opt.DirectPkts case nl.TCA_HTB_DIRECT_QLEN: - // TODO - //htb.DirectQlen = native.uint32(datum.Value) + directQlen := native.Uint32(datum.Value) + htb.DirectQlen = &directQlen } } return nil @@ -546,6 +595,11 @@ func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { fq.FlowMaxRate = native.Uint32(datum.Value) case nl.TCA_FQ_FLOW_DEFAULT_RATE: fq.FlowDefaultRate = native.Uint32(datum.Value) + case nl.TCA_FQ_HORIZON: + fq.Horizon = native.Uint32(datum.Value) + case nl.TCA_FQ_HORIZON_DROP: + fq.HorizonDropPolicy = datum.Value[0] + } } return nil @@ -564,6 +618,8 @@ func parseNetemData(qdisc Qdisc, value []byte) error { if err != nil { return err } + var rate *nl.TcNetemRate + var rate64 uint64 for _, datum := range data { switch datum.Attr.Type { case nl.TCA_NETEM_CORR: @@ -579,8 +635,19 @@ func parseNetemData(qdisc Qdisc, value []byte) error { opt := nl.DeserializeTcNetemReorder(datum.Value) netem.ReorderProb = opt.Probability netem.ReorderCorr = opt.Correlation + case nl.TCA_NETEM_RATE: + rate = nl.DeserializeTcNetemRate(datum.Value) + case nl.TCA_NETEM_RATE64: + rate64 = native.Uint64(datum.Value) } } + if rate != nil { + netem.Rate64 = uint64(rate.Rate) + if rate64 > 0 { + netem.Rate64 = rate64 + } + } + return nil } @@ -624,6 +691,9 @@ var ( tickInUsec float64 clockFactor float64 hz float64 + + // Without this, the go race detector may report races. + initClockMutex sync.Mutex ) func initClock() { @@ -658,6 +728,8 @@ func initClock() { } func TickInUsec() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() if tickInUsec == 0.0 { initClock() } @@ -665,6 +737,8 @@ func TickInUsec() float64 { } func ClockFactor() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() if clockFactor == 0.0 { initClock() } @@ -672,6 +746,8 @@ func ClockFactor() float64 { } func Hz() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() if hz == 0.0 { initClock() } diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 79cc218ec..1b4555d5c 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -154,8 +154,15 @@ type flagString struct { } // RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE + +// NlFlags is only non-zero for RTM_NEWROUTE, the following flags can be set: +// - unix.NLM_F_REPLACE - Replace existing matching config object with this request +// - unix.NLM_F_EXCL - Don't replace the config object if it already exists +// - unix.NLM_F_CREATE - Create config object if it doesn't already exist +// - unix.NLM_F_APPEND - Add to the end of the object list type RouteUpdate struct { - Type uint16 + Type uint16 + NlFlags uint16 Route } diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index 8da886657..0cd4f8363 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -41,7 +41,6 @@ func (s Scope) String() string { } } - const ( FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE @@ -274,6 +273,16 @@ type SEG6LocalEncap struct { In6Addr net.IP Iif int Oif int + bpf bpfObj +} + +func (e *SEG6LocalEncap) SetProg(progFd int, progName string) error { + if progFd <= 0 { + return fmt.Errorf("seg6local bpf SetProg: invalid fd") + } + e.bpf.progFd = progFd + e.bpf.progName = progName + return nil } func (e *SEG6LocalEncap) Type() int { @@ -307,6 +316,22 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { case nl.SEG6_LOCAL_OIF: e.Oif = int(native.Uint32(attr.Value[0:4])) e.Flags[nl.SEG6_LOCAL_OIF] = true + case nl.SEG6_LOCAL_BPF: + var bpfAttrs []syscall.NetlinkRouteAttr + bpfAttrs, err = nl.ParseRouteAttr(attr.Value) + bpfobj := bpfObj{} + for _, bpfAttr := range bpfAttrs { + switch bpfAttr.Attr.Type { + case nl.LWT_BPF_PROG_FD: + bpfobj.progFd = int(native.Uint32(bpfAttr.Value)) + case nl.LWT_BPF_PROG_NAME: + bpfobj.progName = string(bpfAttr.Value) + default: + err = fmt.Errorf("seg6local bpf decode: unknown attribute: Type %d", bpfAttr.Attr) + } + } + e.bpf = bpfobj + e.Flags[nl.SEG6_LOCAL_BPF] = true } } return err @@ -368,6 +393,16 @@ func (e *SEG6LocalEncap) Encode() ([]byte, error) { native.PutUint32(attr[4:], uint32(e.Oif)) res = append(res, attr...) } + if e.Flags[nl.SEG6_LOCAL_BPF] { + attr := nl.NewRtAttr(nl.SEG6_LOCAL_BPF, []byte{}) + if e.bpf.progFd != 0 { + attr.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(e.bpf.progFd))) + } + if e.bpf.progName != "" { + attr.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(e.bpf.progName)) + } + res = append(res, attr.Serialize()...) + } return res, err } func (e *SEG6LocalEncap) String() string { @@ -401,12 +436,15 @@ func (e *SEG6LocalEncap) String() string { } if e.Flags[nl.SEG6_LOCAL_SRH] { segs := make([]string, 0, len(e.Segments)) - //append segment backwards (from n to 0) since seg#0 is the last segment. + // append segment backwards (from n to 0) since seg#0 is the last segment. for i := len(e.Segments); i > 0; i-- { segs = append(segs, e.Segments[i-1].String()) } strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " "))) } + if e.Flags[nl.SEG6_LOCAL_BPF] { + strs = append(strs, fmt.Sprintf("bpf %s[%d]", e.bpf.progName, e.bpf.progFd)) + } return strings.Join(strs, " ") } func (e *SEG6LocalEncap) Equal(x Encap) bool { @@ -438,7 +476,7 @@ func (e *SEG6LocalEncap) Equal(x Encap) bool { if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) { return false } - if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif { + if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf { return false } return true @@ -590,6 +628,109 @@ func (e *BpfEncap) Equal(x Encap) bool { return true } +// IP6tnlEncap definition +type IP6tnlEncap struct { + ID uint64 + Dst net.IP + Src net.IP + Hoplimit uint8 + TC uint8 + Flags uint16 +} + +func (e *IP6tnlEncap) Type() int { + return nl.LWTUNNEL_ENCAP_IP6 +} + +func (e *IP6tnlEncap) Decode(buf []byte) error { + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.LWTUNNEL_IP6_ID: + e.ID = uint64(native.Uint64(attr.Value[0:4])) + case nl.LWTUNNEL_IP6_DST: + e.Dst = net.IP(attr.Value[:]) + case nl.LWTUNNEL_IP6_SRC: + e.Src = net.IP(attr.Value[:]) + case nl.LWTUNNEL_IP6_HOPLIMIT: + e.Hoplimit = attr.Value[0] + case nl.LWTUNNEL_IP6_TC: + // e.TC = attr.Value[0] + err = fmt.Errorf("decoding TC in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_FLAGS: + // e.Flags = uint16(native.Uint16(attr.Value[0:2])) + err = fmt.Errorf("decoding FLAG in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_PAD: + err = fmt.Errorf("decoding PAD in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_OPTS: + err = fmt.Errorf("decoding OPTS in IP6tnlEncap is not supported") + } + } + return err +} + +func (e *IP6tnlEncap) Encode() ([]byte, error) { + + final := []byte{} + + resID := make([]byte, 12) + native.PutUint16(resID, 12) // 2+2+8 + native.PutUint16(resID[2:], nl.LWTUNNEL_IP6_ID) + native.PutUint64(resID[4:], 0) + final = append(final, resID...) + + resDst := make([]byte, 4) + native.PutUint16(resDst, 20) // 2+2+16 + native.PutUint16(resDst[2:], nl.LWTUNNEL_IP6_DST) + resDst = append(resDst, e.Dst...) + final = append(final, resDst...) + + resSrc := make([]byte, 4) + native.PutUint16(resSrc, 20) + native.PutUint16(resSrc[2:], nl.LWTUNNEL_IP6_SRC) + resSrc = append(resSrc, e.Src...) + final = append(final, resSrc...) + + // resTc := make([]byte, 5) + // native.PutUint16(resTc, 5) + // native.PutUint16(resTc[2:], nl.LWTUNNEL_IP6_TC) + // resTc[4] = e.TC + // final = append(final,resTc...) + + resHops := make([]byte, 5) + native.PutUint16(resHops, 5) + native.PutUint16(resHops[2:], nl.LWTUNNEL_IP6_HOPLIMIT) + resHops[4] = e.Hoplimit + final = append(final, resHops...) + + // resFlags := make([]byte, 6) + // native.PutUint16(resFlags, 6) + // native.PutUint16(resFlags[2:], nl.LWTUNNEL_IP6_FLAGS) + // native.PutUint16(resFlags[4:], e.Flags) + // final = append(final,resFlags...) + + return final, nil +} + +func (e *IP6tnlEncap) String() string { + return fmt.Sprintf("id %d src %s dst %s hoplimit %d tc %d flags 0x%.4x", e.ID, e.Src, e.Dst, e.Hoplimit, e.TC, e.Flags) +} + +func (e *IP6tnlEncap) Equal(x Encap) bool { + o, ok := x.(*IP6tnlEncap) + if !ok { + return false + } + + if e.ID != o.ID || e.Flags != o.Flags || e.Hoplimit != o.Hoplimit || e.Src.Equal(o.Src) || e.Dst.Equal(o.Dst) || e.TC != o.TC { + return false + } + return true +} + type Via struct { AddrFamily int Addr net.IP @@ -656,7 +797,8 @@ func RouteAdd(route *Route) error { func (h *Handle) RouteAdd(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteAppend will append a route to the system. @@ -670,7 +812,8 @@ func RouteAppend(route *Route) error { func (h *Handle) RouteAppend(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_APPEND | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteAddEcmp will add a route to the system. @@ -682,7 +825,23 @@ func RouteAddEcmp(route *Route) error { func (h *Handle) RouteAddEcmp(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteChange will change an existing route in the system. +// Equivalent to: `ip route change $route` +func RouteChange(route *Route) error { + return pkgHandle.RouteChange(route) +} + +// RouteChange will change an existing route in the system. +// Equivalent to: `ip route change $route` +func (h *Handle) RouteChange(route *Route) error { + flags := unix.NLM_F_REPLACE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteReplace will add a route to the system. @@ -696,7 +855,8 @@ func RouteReplace(route *Route) error { func (h *Handle) RouteReplace(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteDel will delete a route from the system. @@ -709,12 +869,27 @@ func RouteDel(route *Route) error { // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) - return h.routeHandle(route, req, nl.NewRtDelMsg()) + _, err := h.routeHandle(route, req, nl.NewRtDelMsg()) + return err } -func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { - if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { - return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil") +func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) ([][]byte, error) { + if err := h.prepareRouteReq(route, req, msg); err != nil { + return nil, err + } + return req.Execute(unix.NETLINK_ROUTE, 0) +} + +func (h *Handle) routeHandleIter(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg, f func(msg []byte) bool) error { + if err := h.prepareRouteReq(route, req, msg); err != nil { + return err + } + return req.ExecuteIter(unix.NETLINK_ROUTE, 0, f) +} + +func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { + if req.NlMsghdr.Type != unix.RTM_GETROUTE && (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { + return fmt.Errorf("either Dst.IP, Src.IP or Gw must be set") } family := -1 @@ -968,19 +1143,21 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) - msg.Family = uint8(family) + // only overwrite family if it was not set in msg + if msg.Family == 0 { + msg.Family = uint8(family) + } req.AddData(msg) for _, attr := range rtAttrs { req.AddData(attr) } - b := make([]byte, 4) - native.PutUint32(b, uint32(route.LinkIndex)) - - req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err + if (req.NlMsghdr.Type != unix.RTM_GETROUTE) || (req.NlMsghdr.Type == unix.RTM_GETROUTE && route.LinkIndex > 0) { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.LinkIndex)) + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + return nil } // RouteList gets a list of routes in the system. @@ -994,13 +1171,13 @@ func RouteList(link Link, family int) ([]Route, error) { // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. func (h *Handle) RouteList(link Link, family int) ([]Route, error) { - var routeFilter *Route + routeFilter := &Route{} if link != nil { - routeFilter = &Route{ - LinkIndex: link.Attrs().Index, - } + routeFilter.LinkIndex = link.Attrs().Index + + return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) } - return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) + return h.RouteListFiltered(family, routeFilter, 0) } // RouteListFiltered gets a list of routes in the system filtered with specified rules. @@ -1012,68 +1189,94 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { - req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) - rtmsg := nl.NewRtMsg() - rtmsg.Family = uint8(family) - req.AddData(rtmsg) - - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + var res []Route + err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) { + res = append(res, route) + return true + }) if err != nil { return nil, err } + return res, nil +} - var res []Route - for _, m := range msgs { +// RouteListFilteredIter passes each route that matches the filter to the given iterator func. Iteration continues +// until all routes are loaded or the func returns false. +func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { + return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f) +} + +func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) + rtmsg := &nl.RtMsg{} + rtmsg.Family = uint8(family) + + var parseErr error + err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { msg := nl.DeserializeRtMsg(m) + if family != FAMILY_ALL && msg.Family != uint8(family) { + // Ignore routes not matching requested family + return true + } if msg.Flags&unix.RTM_F_CLONED != 0 { // Ignore cloned routes - continue + return true } if msg.Table != unix.RT_TABLE_MAIN { - if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { + if filter == nil || filterMask&RT_FILTER_TABLE == 0 { // Ignore non-main tables - continue + return true } } route, err := deserializeRoute(m) if err != nil { - return nil, err + parseErr = err + return false } if filter != nil { switch { case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: - continue + return true case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: - continue + return true case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope: - continue + return true case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type: - continue + return true case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos: - continue + return true case filterMask&RT_FILTER_REALM != 0 && route.Realm != filter.Realm: - continue + return true case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex: - continue + return true case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex: - continue + return true case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw): - continue + return true case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src): - continue + return true case filterMask&RT_FILTER_DST != 0: if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) { + if filter.Dst == nil { + filter.Dst = genZeroIPNet(family) + } if !ipNetEqual(route.Dst, filter.Dst) { - continue + return true } } case filterMask&RT_FILTER_HOPLIMIT != 0 && route.Hoplimit != filter.Hoplimit: - continue + return true } } - res = append(res, route) + return f(route) + }) + if err != nil { + return err } - return res, nil + if parseErr != nil { + return parseErr + } + return nil } // deserializeRoute decodes a binary netlink message into a Route struct @@ -1257,6 +1460,27 @@ func deserializeRoute(m []byte) (Route, error) { } } + // Same logic to generate "default" dst with iproute2 implementation + if route.Dst == nil { + var addLen int + var ip net.IP + switch msg.Family { + case FAMILY_V4: + addLen = net.IPv4len + ip = net.IPv4zero + case FAMILY_V6: + addLen = net.IPv6len + ip = net.IPv6zero + } + + if addLen != 0 { + route.Dst = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(msg.Dst_len), 8*addLen), + } + } + } + if len(encap.Value) != 0 && len(encapType.Value) != 0 { typ := int(native.Uint16(encapType.Value[0:2])) var e Encap @@ -1291,10 +1515,14 @@ func deserializeRoute(m []byte) (Route, error) { // RouteGetOptions contains a set of options to use with // RouteGetWithOptions type RouteGetOptions struct { - Iif string - Oif string - VrfName string - SrcAddr net.IP + Iif string + IifIndex int + Oif string + VrfName string + SrcAddr net.IP + UID *uint32 + Mark uint32 + FIBMatch bool } // RouteGetWithOptions gets a route to a specific destination from the host system. @@ -1330,6 +1558,9 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption msg.Src_len = bitlen } msg.Flags = unix.RTM_F_LOOKUP_TABLE + if options != nil && options.FIBMatch { + msg.Flags |= unix.RTM_F_FIB_MATCH + } req.AddData(msg) rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) @@ -1337,7 +1568,7 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption if options != nil { if options.VrfName != "" { - link, err := LinkByName(options.VrfName) + link, err := h.LinkByName(options.VrfName) if err != nil { return nil, err } @@ -1347,20 +1578,27 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) } + iifIndex := 0 if len(options.Iif) > 0 { - link, err := LinkByName(options.Iif) + link, err := h.LinkByName(options.Iif) if err != nil { return nil, err } + iifIndex = link.Attrs().Index + } else if options.IifIndex > 0 { + iifIndex = options.IifIndex + } + + if iifIndex > 0 { b := make([]byte, 4) - native.PutUint32(b, uint32(link.Attrs().Index)) + native.PutUint32(b, uint32(iifIndex)) req.AddData(nl.NewRtAttr(unix.RTA_IIF, b)) } if len(options.Oif) > 0 { - link, err := LinkByName(options.Oif) + link, err := h.LinkByName(options.Oif) if err != nil { return nil, err } @@ -1381,6 +1619,21 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption req.AddData(nl.NewRtAttr(unix.RTA_SRC, srcAddr)) } + + if options.UID != nil { + uid := *options.UID + b := make([]byte, 4) + native.PutUint32(b, uid) + + req.AddData(nl.NewRtAttr(unix.RTA_UID, b)) + } + + if options.Mark > 0 { + b := make([]byte, 4) + native.PutUint32(b, options.Mark) + + req.AddData(nl.NewRtAttr(unix.RTA_MARK, b)) + } } msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) @@ -1408,21 +1661,24 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { // RouteSubscribe takes a chan down which notifications will be sent // when routes are added or deleted. Close the 'done' chan to stop subscription. func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // RouteSubscribeAt works like RouteSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(ns, netns.None(), ch, done, nil, false) + return routeSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // RouteSubscribeOptions contains a set of options to use with // RouteSubscribeWithOptions. type RouteSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // RouteSubscribeWithOptions work like RouteSubscribe but enable to @@ -1433,14 +1689,27 @@ func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, opti none := netns.None() options.Namespace = &none } - return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done @@ -1495,7 +1764,11 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < } continue } - ch <- RouteUpdate{Type: m.Header.Type, Route: route} + ch <- RouteUpdate{ + Type: m.Header.Type, + NlFlags: m.Header.Flags & (unix.NLM_F_REPLACE | unix.NLM_F_EXCL | unix.NLM_F_CREATE | unix.NLM_F_APPEND), + Route: route, + } } } }() @@ -1523,7 +1796,7 @@ func (p RouteProtocol) String() string { return "gated" case unix.RTPROT_ISIS: return "isis" - //case unix.RTPROT_KEEPALIVED: + // case unix.RTPROT_KEEPALIVED: // return "keepalived" case unix.RTPROT_KERNEL: return "kernel" @@ -1553,3 +1826,24 @@ func (p RouteProtocol) String() string { return strconv.Itoa(int(p)) } } + +// genZeroIPNet returns 0.0.0.0/0 or ::/0 for IPv4 or IPv6, otherwise nil +func genZeroIPNet(family int) *net.IPNet { + var addLen int + var ip net.IP + switch family { + case FAMILY_V4: + addLen = net.IPv4len + ip = net.IPv4zero + case FAMILY_V6: + addLen = net.IPv6len + ip = net.IPv6zero + } + if addLen != 0 { + return &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(0, 8*addLen), + } + } + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index 53cd3d4f6..9d74c7cd8 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -10,8 +10,8 @@ type Rule struct { Priority int Family int Table int - Mark int - Mask int + Mark uint32 + Mask *uint32 Tos uint TunID uint Goto int @@ -26,6 +26,9 @@ type Rule struct { Dport *RulePortRange Sport *RulePortRange IPProto int + UIDRange *RuleUIDRange + Protocol uint8 + Type uint8 } func (r Rule) String() string { @@ -39,8 +42,8 @@ func (r Rule) String() string { to = r.Dst.String() } - return fmt.Sprintf("ip rule %d: from %s to %s table %d", - r.Priority, from, to, r.Table) + return fmt.Sprintf("ip rule %d: from %s to %s table %d %s", + r.Priority, from, to, r.Table, r.typeString()) } // NewRule return empty rules. @@ -49,8 +52,8 @@ func NewRule() *Rule { SuppressIfgroup: -1, SuppressPrefixlen: -1, Priority: -1, - Mark: -1, - Mask: -1, + Mark: 0, + Mask: nil, Goto: -1, Flow: -1, } @@ -66,3 +69,14 @@ type RulePortRange struct { Start uint16 End uint16 } + +// NewRuleUIDRange creates rule uid range. +func NewRuleUIDRange(start, end uint32) *RuleUIDRange { + return &RuleUIDRange{Start: start, End: end} +} + +// RuleUIDRange represents rule uid range. +type RuleUIDRange struct { + Start uint32 + End uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index 3ae213880..ddff99cfa 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -43,8 +43,8 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg.Protocol = unix.RTPROT_BOOT msg.Scope = unix.RT_SCOPE_UNIVERSE msg.Table = unix.RT_TABLE_UNSPEC - msg.Type = unix.RTN_UNSPEC - if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { + msg.Type = rule.Type // usually 0, same as unix.RTN_UNSPEC + if msg.Type == 0 && req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { msg.Type = unix.RTN_UNICAST } if rule.Invert { @@ -102,14 +102,14 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { native.PutUint32(b, uint32(rule.Priority)) req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b)) } - if rule.Mark >= 0 { + if rule.Mark != 0 || rule.Mask != nil { b := make([]byte, 4) - native.PutUint32(b, uint32(rule.Mark)) + native.PutUint32(b, rule.Mark) req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b)) } - if rule.Mask >= 0 { + if rule.Mask != nil { b := make([]byte, 4) - native.PutUint32(b, uint32(rule.Mask)) + native.PutUint32(b, *rule.Mask) req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b)) } if rule.Flow >= 0 { @@ -168,6 +168,15 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_SPORT_RANGE, b)) } + if rule.UIDRange != nil { + b := rule.UIDRange.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_UID_RANGE, b)) + } + + if rule.Protocol > 0 { + req.AddData(nl.NewRtAttr(nl.FRA_PROTOCOL, nl.Uint8Attr(rule.Protocol))) + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -212,8 +221,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( } rule := NewRule() + rule.Priority = 0 // The default priority from kernel rule.Invert = msg.Flags&FibRuleInvert > 0 + rule.Family = int(msg.Family) rule.Tos = uint(msg.Tos) for j := range attrs { @@ -231,9 +242,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)), } case nl.FRA_FWMARK: - rule.Mark = int(native.Uint32(attrs[j].Value[0:4])) + rule.Mark = native.Uint32(attrs[j].Value[0:4]) case nl.FRA_FWMASK: - rule.Mask = int(native.Uint32(attrs[j].Value[0:4])) + mask := native.Uint32(attrs[j].Value[0:4]) + rule.Mask = &mask case nl.FRA_TUN_ID: rule.TunID = uint(native.Uint64(attrs[j].Value[0:8])) case nl.FRA_IIFNAME: @@ -262,6 +274,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) case nl.FRA_SPORT_RANGE: rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_UID_RANGE: + rule.UIDRange = NewRuleUIDRange(native.Uint32(attrs[j].Value[0:4]), native.Uint32(attrs[j].Value[4:8])) + case nl.FRA_PROTOCOL: + rule.Protocol = uint8(attrs[j].Value[0]) } } @@ -282,7 +298,7 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( continue case filterMask&RT_FILTER_MARK != 0 && rule.Mark != filter.Mark: continue - case filterMask&RT_FILTER_MASK != 0 && rule.Mask != filter.Mask: + case filterMask&RT_FILTER_MASK != 0 && !ptrEqual(rule.Mask, filter.Mask): continue } } @@ -299,3 +315,51 @@ func (pr *RulePortRange) toRtAttrData() []byte { native.PutUint16(b[1], pr.End) return bytes.Join(b, []byte{}) } + +func (pr *RuleUIDRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 4), make([]byte, 4)} + native.PutUint32(b[0], pr.Start) + native.PutUint32(b[1], pr.End) + return bytes.Join(b, []byte{}) +} + +func ptrEqual(a, b *uint32) bool { + if a == b { + return true + } + if (a == nil) || (b == nil) { + return false + } + return *a == *b +} + +func (r Rule) typeString() string { + switch r.Type { + case unix.RTN_UNSPEC: // zero + return "" + case unix.RTN_UNICAST: + return "" + case unix.RTN_LOCAL: + return "local" + case unix.RTN_BROADCAST: + return "broadcast" + case unix.RTN_ANYCAST: + return "anycast" + case unix.RTN_MULTICAST: + return "multicast" + case unix.RTN_BLACKHOLE: + return "blackhole" + case unix.RTN_UNREACHABLE: + return "unreachable" + case unix.RTN_PROHIBIT: + return "prohibit" + case unix.RTN_THROW: + return "throw" + case unix.RTN_NAT: + return "nat" + case unix.RTN_XRESOLVE: + return "xresolve" + default: + return fmt.Sprintf("type(0x%x)", r.Type) + } +} diff --git a/vendor/github.com/vishvananda/netlink/rule_nonlinux.go b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go new file mode 100644 index 000000000..2b19aa64c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package netlink + +func (r Rule) typeString() string { + return "" +} diff --git a/vendor/github.com/vishvananda/netlink/socket.go b/vendor/github.com/vishvananda/netlink/socket.go index 41aa72624..e65efb130 100644 --- a/vendor/github.com/vishvananda/netlink/socket.go +++ b/vendor/github.com/vishvananda/netlink/socket.go @@ -25,3 +25,80 @@ type Socket struct { UID uint32 INode uint32 } + +// UnixSocket represents a netlink unix socket. +type UnixSocket struct { + Type uint8 + Family uint8 + State uint8 + pad uint8 + INode uint32 + Cookie [2]uint32 +} + +// XDPSocket represents an XDP socket (and the common diagnosis part in +// particular). Please note that in contrast to [UnixSocket] the XDPSocket type +// does not feature “State” information. +type XDPSocket struct { + // xdp_diag_msg + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21 + Family uint8 + Type uint8 + pad uint16 + Ino uint32 + Cookie [2]uint32 +} + +type XDPInfo struct { + // XDP_DIAG_INFO/xdp_diag_info + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L51 + Ifindex uint32 + QueueID uint32 + + // XDP_DIAG_UID + UID uint32 + + // XDP_RX_RING + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L56 + RxRingEntries uint32 + TxRingEntries uint32 + UmemFillRingEntries uint32 + UmemCompletionRingEntries uint32 + + // XDR_DIAG_UMEM + Umem *XDPDiagUmem + + // XDR_DIAG_STATS + Stats *XDPDiagStats +} + +const ( + XDP_DU_F_ZEROCOPY = 1 << iota +) + +// XDPDiagUmem describes the umem attached to an XDP socket. +// +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L62 +type XDPDiagUmem struct { + Size uint64 + ID uint32 + NumPages uint32 + ChunkSize uint32 + Headroom uint32 + Ifindex uint32 + QueueID uint32 + Flags uint32 + Refs uint32 +} + +// XDPDiagStats contains ring statistics for an XDP socket. +// +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L74 +type XDPDiagStats struct { + RxDropped uint64 + RxInvalid uint64 + RxFull uint64 + FillRingEmpty uint64 + TxInvalid uint64 + TxRingEmpty uint64 +} diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index b881fe496..4eb4aeafb 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -11,9 +11,11 @@ import ( ) const ( - sizeofSocketID = 0x30 - sizeofSocketRequest = sizeofSocketID + 0x8 - sizeofSocket = sizeofSocketID + 0x18 + sizeofSocketID = 0x30 + sizeofSocketRequest = sizeofSocketID + 0x8 + sizeofSocket = sizeofSocketID + 0x18 + sizeofUnixSocketRequest = 0x18 // 24 byte + sizeofUnixSocket = 0x10 // 16 byte ) type socketRequest struct { @@ -54,10 +56,8 @@ func (r *socketRequest) Serialize() []byte { copy(b.Next(16), r.ID.Source) copy(b.Next(16), r.ID.Destination) } else { - copy(b.Next(4), r.ID.Source.To4()) - b.Next(12) - copy(b.Next(4), r.ID.Destination.To4()) - b.Next(12) + copy(b.Next(16), r.ID.Source.To4()) + copy(b.Next(16), r.ID.Destination.To4()) } native.PutUint32(b.Next(4), r.ID.Interface) native.PutUint32(b.Next(4), r.ID.Cookie[0]) @@ -67,6 +67,32 @@ func (r *socketRequest) Serialize() []byte { func (r *socketRequest) Len() int { return sizeofSocketRequest } +// According to linux/include/uapi/linux/unix_diag.h +type unixSocketRequest struct { + Family uint8 + Protocol uint8 + pad uint16 + States uint32 + INode uint32 + Show uint32 + Cookie [2]uint32 +} + +func (r *unixSocketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofUnixSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + native.PutUint16(b.Next(2), r.pad) + native.PutUint32(b.Next(4), r.States) + native.PutUint32(b.Next(4), r.INode) + native.PutUint32(b.Next(4), r.Show) + native.PutUint32(b.Next(4), r.Cookie[0]) + native.PutUint32(b.Next(4), r.Cookie[1]) + return b.Bytes +} + +func (r *unixSocketRequest) Len() int { return sizeofUnixSocketRequest } + type readBuffer struct { Bytes []byte pos int @@ -115,31 +141,126 @@ func (s *Socket) deserialize(b []byte) error { return nil } +func (u *UnixSocket) deserialize(b []byte) error { + if len(b) < sizeofUnixSocket { + return fmt.Errorf("unix diag data short read (%d); want %d", len(b), sizeofUnixSocket) + } + rb := readBuffer{Bytes: b} + u.Type = rb.Read() + u.Family = rb.Read() + u.State = rb.Read() + u.pad = rb.Read() + u.INode = native.Uint32(rb.Next(4)) + u.Cookie[0] = native.Uint32(rb.Next(4)) + u.Cookie[1] = native.Uint32(rb.Next(4)) + return nil +} + +// SocketGet returns the Socket identified by its local and remote addresses. +func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { + var protocol uint8 + var localIP, remoteIP net.IP + var localPort, remotePort uint16 + switch l := local.(type) { + case *net.TCPAddr: + r, ok := remote.(*net.TCPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP = l.IP + localPort = uint16(l.Port) + remoteIP = r.IP + remotePort = uint16(r.Port) + protocol = unix.IPPROTO_TCP + case *net.UDPAddr: + r, ok := remote.(*net.UDPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP = l.IP + localPort = uint16(l.Port) + remoteIP = r.IP + remotePort = uint16(r.Port) + protocol = unix.IPPROTO_UDP + default: + return nil, ErrNotImplemented + } + + var family uint8 + if localIP.To4() != nil && remoteIP.To4() != nil { + family = unix.AF_INET + } + + if family == 0 && localIP.To16() != nil && remoteIP.To16() != nil { + family = unix.AF_INET6 + } + + if family == 0 { + return nil, ErrNotImplemented + } + + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: protocol, + States: 0xffffffff, + ID: SocketID{ + SourcePort: localPort, + DestinationPort: remotePort, + Source: localIP, + Destination: remoteIP, + Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, + }, + }) + + msgs, err := req.Execute(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY) + if err != nil { + return nil, err + } + if len(msgs) == 0 { + return nil, errors.New("no message nor error from netlink") + } + if len(msgs) > 2 { + return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs)) + } + + sock := &Socket{} + if err := sock.deserialize(msgs[0]); err != nil { + return nil, err + } + return sock, nil +} + // SocketGet returns the Socket identified by its local and remote addresses. func SocketGet(local, remote net.Addr) (*Socket, error) { + return pkgHandle.SocketGet(local, remote) +} + +// SocketDestroy kills the Socket identified by its local and remote addresses. +func (h *Handle) SocketDestroy(local, remote net.Addr) error { localTCP, ok := local.(*net.TCPAddr) if !ok { - return nil, ErrNotImplemented + return ErrNotImplemented } remoteTCP, ok := remote.(*net.TCPAddr) if !ok { - return nil, ErrNotImplemented + return ErrNotImplemented } localIP := localTCP.IP.To4() if localIP == nil { - return nil, ErrNotImplemented + return ErrNotImplemented } remoteIP := remoteTCP.IP.To4() if remoteIP == nil { - return nil, ErrNotImplemented + return ErrNotImplemented } s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) if err != nil { - return nil, err + return err } defer s.Close() - req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) + req := h.newNetlinkRequest(nl.SOCK_DESTROY, unix.NLM_F_ACK) req.AddData(&socketRequest{ Family: unix.AF_INET, Protocol: unix.IPPROTO_TCP, @@ -151,64 +272,81 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, }, }) - s.Send(req) - msgs, from, err := s.Receive() - if err != nil { - return nil, err - } - if from.Pid != nl.PidKernel { - return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) - } - if len(msgs) == 0 { - return nil, errors.New("no message nor error from netlink") - } - if len(msgs) > 2 { - return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs)) - } - sock := &Socket{} - if err := sock.deserialize(msgs[0].Data); err != nil { - return nil, err - } - return sock, nil + + _, err = req.Execute(unix.NETLINK_INET_DIAG, 0) + return err +} + +// SocketDestroy kills the Socket identified by its local and remote addresses. +func SocketDestroy(local, remote net.Addr) error { + return pkgHandle.SocketDestroy(local, remote) } // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. -func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { +func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result var result []*InetDiagTCPInfoResp - err := socketDiagTCPExecutor(family, func(m syscall.NetlinkMessage) error { + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err := sockInfo.deserialize(m.Data); err != nil { - return err + if err = sockInfo.deserialize(msg); err != nil { + return false } - attrs, err := nl.ParseRouteAttr(m.Data[sizeofSocket:]) - if err != nil { - return err + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false } - res, err := attrsToInetDiagTCPInfoResp(attrs, sockInfo) - if err != nil { - return err + var res *InetDiagTCPInfoResp + if res, err = attrsToInetDiagTCPInfoResp(attrs, sockInfo); err != nil { + return false } result = append(result, res) - return nil + return true }) + if err != nil { return nil, err } return result, nil } +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + return pkgHandle.SocketDiagTCPInfo(family) +} + // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. -func SocketDiagTCP(family uint8) ([]*Socket, error) { +func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result var result []*Socket - err := socketDiagTCPExecutor(family, func(m syscall.NetlinkMessage) error { + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err := sockInfo.deserialize(m.Data); err != nil { - return err + if err = sockInfo.deserialize(msg); err != nil { + return false } result = append(result, sockInfo) - return nil + return true }) if err != nil { return nil, err @@ -216,76 +354,237 @@ func SocketDiagTCP(family uint8) ([]*Socket, error) { return result, nil } -// socketDiagTCPExecutor requests INET_DIAG_INFO for TCP protocol for specified family type. -func socketDiagTCPExecutor(family uint8, receiver func(syscall.NetlinkMessage) error) error { - s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) +// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +func SocketDiagTCP(family uint8) ([]*Socket, error) { + return pkgHandle.SocketDiagTCP(family) +} + +// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { + // Construct the request + var extensions uint8 + extensions = 1 << (INET_DIAG_VEGASINFO - 1) + extensions |= 1 << (INET_DIAG_INFO - 1) + extensions |= 1 << (INET_DIAG_MEMINFO - 1) + + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_UDP, + Ext: extensions, + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*InetDiagUDPInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *InetDiagUDPInfoResp + if res, err = attrsToInetDiagUDPInfoResp(attrs, sockInfo); err != nil { + return false + } + + result = append(result, res) + return true + }) if err != nil { - return err + return nil, err } - defer s.Close() + return result, nil +} - req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) +// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { + return pkgHandle.SocketDiagUDPInfo(family) +} + +// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) req.AddData(&socketRequest{ Family: family, - Protocol: unix.IPPROTO_TCP, + Protocol: unix.IPPROTO_UDP, Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), - States: uint32(0xfff), // All TCP states + States: uint32(0xfff), // all states }) - s.Send(req) -loop: - for { - msgs, from, err := s.Receive() - if err != nil { - return err + // Do the query and parse the result + var result []*Socket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false } - if from.Pid != nl.PidKernel { - return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + result = append(result, sockInfo) + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +func SocketDiagUDP(family uint8) ([]*Socket, error) { + return pkgHandle.SocketDiagUDP(family) +} + +// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { + // Construct the request + var extensions uint8 + extensions = 1 << UNIX_DIAG_NAME + extensions |= 1 << UNIX_DIAG_PEER + extensions |= 1 << UNIX_DIAG_RQLEN + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&unixSocketRequest{ + Family: unix.AF_UNIX, + States: ^uint32(0), // all states + Show: uint32(extensions), + }) + + var result []*UnixDiagInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &UnixSocket{} + if err = sockInfo.deserialize(msg); err != nil { + return false } - if len(msgs) == 0 { - return errors.New("no message nor error from netlink") + + // Diagnosis also delivers sockets with AF_INET family, filter those + if sockInfo.Family != unix.AF_UNIX { + return false } - for _, m := range msgs { - switch m.Header.Type { - case unix.NLMSG_DONE: - break loop - case unix.NLMSG_ERROR: - error := int32(native.Uint32(m.Data[0:4])) - return syscall.Errno(-error) - } - if err := receiver(m); err != nil { - return err - } + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false } + + var res *UnixDiagInfoResp + if res, err = attrsToUnixDiagInfoResp(attrs, sockInfo); err != nil { + return false + } + result = append(result, res) + return true + }) + if err != nil { + return nil, err } - return nil + return result, nil +} + +// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { + return pkgHandle.UnixSocketDiagInfo() +} + +// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&unixSocketRequest{ + Family: unix.AF_UNIX, + States: ^uint32(0), // all states + }) + + var result []*UnixSocket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &UnixSocket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + // Diagnosis also delivers sockets with AF_INET family, filter those + if sockInfo.Family == unix.AF_UNIX { + result = append(result, sockInfo) + } + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +func UnixSocketDiag() ([]*UnixSocket, error) { + return pkgHandle.UnixSocketDiag() } func attrsToInetDiagTCPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagTCPInfoResp, error) { - var tcpInfo *TCPInfo - var tcpBBRInfo *TCPBBRInfo + info := &InetDiagTCPInfoResp{ + InetDiagMsg: sockInfo, + } for _, a := range attrs { - if a.Attr.Type == INET_DIAG_INFO { - tcpInfo = &TCPInfo{} - if err := tcpInfo.deserialize(a.Value); err != nil { + switch a.Attr.Type { + case INET_DIAG_INFO: + info.TCPInfo = &TCPInfo{} + if err := info.TCPInfo.deserialize(a.Value); err != nil { + return nil, err + } + case INET_DIAG_BBRINFO: + info.TCPBBRInfo = &TCPBBRInfo{} + if err := info.TCPBBRInfo.deserialize(a.Value); err != nil { return nil, err } - continue } + } - if a.Attr.Type == INET_DIAG_BBRINFO { - tcpBBRInfo = &TCPBBRInfo{} - if err := tcpBBRInfo.deserialize(a.Value); err != nil { + return info, nil +} + +func attrsToInetDiagUDPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagUDPInfoResp, error) { + info := &InetDiagUDPInfoResp{ + InetDiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case INET_DIAG_MEMINFO: + info.Memory = &MemInfo{} + if err := info.Memory.deserialize(a.Value); err != nil { return nil, err } - continue } } - return &InetDiagTCPInfoResp{ - InetDiagMsg: sockInfo, - TCPInfo: tcpInfo, - TCPBBRInfo: tcpBBRInfo, - }, nil + return info, nil +} + +func attrsToUnixDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *UnixSocket) (*UnixDiagInfoResp, error) { + info := &UnixDiagInfoResp{ + DiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case UNIX_DIAG_NAME: + name := string(a.Value[:a.Attr.Len]) + info.Name = &name + case UNIX_DIAG_PEER: + peer := native.Uint32(a.Value) + info.Peer = &peer + case UNIX_DIAG_RQLEN: + info.Queue = &QueueInfo{ + RQueue: native.Uint32(a.Value[:4]), + WQueue: native.Uint32(a.Value[4:]), + } + // default: + // fmt.Println("unknown unix attribute type", a.Attr.Type, "with data", a.Value) + } + } + + return info, nil } diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go new file mode 100644 index 000000000..20c82f9c7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go @@ -0,0 +1,195 @@ +package netlink + +import ( + "errors" + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const ( + sizeofXDPSocketRequest = 1 + 1 + 2 + 4 + 4 + 2*4 + sizeofXDPSocket = 0x10 +) + +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L12 +type xdpSocketRequest struct { + Family uint8 + Protocol uint8 + pad uint16 + Ino uint32 + Show uint32 + Cookie [2]uint32 +} + +func (r *xdpSocketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + native.PutUint16(b.Next(2), r.pad) + native.PutUint32(b.Next(4), r.Ino) + native.PutUint32(b.Next(4), r.Show) + native.PutUint32(b.Next(4), r.Cookie[0]) + native.PutUint32(b.Next(4), r.Cookie[1]) + return b.Bytes +} + +func (r *xdpSocketRequest) Len() int { return sizeofXDPSocketRequest } + +func (s *XDPSocket) deserialize(b []byte) error { + if len(b) < sizeofXDPSocket { + return fmt.Errorf("XDP socket data short read (%d); want %d", len(b), sizeofXDPSocket) + } + rb := readBuffer{Bytes: b} + s.Family = rb.Read() + s.Type = rb.Read() + s.pad = native.Uint16(rb.Next(2)) + s.Ino = native.Uint32(rb.Next(4)) + s.Cookie[0] = native.Uint32(rb.Next(4)) + s.Cookie[1] = native.Uint32(rb.Next(4)) + return nil +} + +// XDPSocketGet returns the XDP socket identified by its inode number and/or +// socket cookie. Specify the cookie as SOCK_ANY_COOKIE if +func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { + // We have a problem here: dumping AF_XDP sockets currently does not support + // filtering. We thus need to dump all XSKs and then only filter afterwards + // :( + xsks, err := SocketDiagXDP() + if err != nil { + return nil, err + } + checkCookie := cookie != SOCK_ANY_COOKIE && cookie != 0 + crumblingCookie := [2]uint32{uint32(cookie), uint32(cookie >> 32)} + checkIno := ino != 0 + var xskinfo *XDPDiagInfoResp + for _, xsk := range xsks { + if checkIno && xsk.XDPDiagMsg.Ino != ino { + continue + } + if checkCookie && xsk.XDPDiagMsg.Cookie != crumblingCookie { + continue + } + if xskinfo != nil { + return nil, errors.New("multiple matching XDP sockets") + } + xskinfo = xsk + } + if xskinfo == nil { + return nil, errors.New("no matching XDP socket") + } + return xskinfo, nil +} + +// SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets. +func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { + var result []*XDPDiagInfoResp + err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error { + sockInfo := &XDPSocket{} + if err := sockInfo.deserialize(m.Data); err != nil { + return err + } + attrs, err := nl.ParseRouteAttr(m.Data[sizeofXDPSocket:]) + if err != nil { + return err + } + + res, err := attrsToXDPDiagInfoResp(attrs, sockInfo) + if err != nil { + return err + } + + result = append(result, res) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets. +func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error { + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + if err != nil { + return err + } + defer s.Close() + + req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&xdpSocketRequest{ + Family: unix.AF_XDP, + Show: XDP_SHOW_INFO | XDP_SHOW_RING_CFG | XDP_SHOW_UMEM | XDP_SHOW_STATS, + }) + if err := s.Send(req); err != nil { + return err + } + +loop: + for { + msgs, from, err := s.Receive() + if err != nil { + return err + } + if from.Pid != nl.PidKernel { + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + } + if len(msgs) == 0 { + return errors.New("no message nor error from netlink") + } + + for _, m := range msgs { + switch m.Header.Type { + case unix.NLMSG_DONE: + break loop + case unix.NLMSG_ERROR: + error := int32(native.Uint32(m.Data[0:4])) + return syscall.Errno(-error) + } + if err := receiver(m); err != nil { + return err + } + } + } + return nil +} + +func attrsToXDPDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *XDPSocket) (*XDPDiagInfoResp, error) { + resp := &XDPDiagInfoResp{ + XDPDiagMsg: sockInfo, + XDPInfo: &XDPInfo{}, + } + for _, a := range attrs { + switch a.Attr.Type { + case XDP_DIAG_INFO: + resp.XDPInfo.Ifindex = native.Uint32(a.Value[0:4]) + resp.XDPInfo.QueueID = native.Uint32(a.Value[4:8]) + case XDP_DIAG_UID: + resp.XDPInfo.UID = native.Uint32(a.Value[0:4]) + case XDP_DIAG_RX_RING: + resp.XDPInfo.RxRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_TX_RING: + resp.XDPInfo.TxRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM_FILL_RING: + resp.XDPInfo.UmemFillRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM_COMPLETION_RING: + resp.XDPInfo.UmemCompletionRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM: + umem := &XDPDiagUmem{} + if err := umem.deserialize(a.Value); err != nil { + return nil, err + } + resp.XDPInfo.Umem = umem + case XDP_DIAG_STATS: + stats := &XDPDiagStats{} + if err := stats.deserialize(a.Value); err != nil { + return nil, err + } + resp.XDPInfo.Stats = stats + } + } + return resp, nil +} diff --git a/vendor/github.com/vishvananda/netlink/tcp.go b/vendor/github.com/vishvananda/netlink/tcp.go index 23ca014d4..43f80a0fc 100644 --- a/vendor/github.com/vishvananda/netlink/tcp.go +++ b/vendor/github.com/vishvananda/netlink/tcp.go @@ -82,3 +82,11 @@ type TCPBBRInfo struct { BBRPacingGain uint32 BBRCwndGain uint32 } + +// According to https://man7.org/linux/man-pages/man7/sock_diag.7.html +type MemInfo struct { + RMem uint32 + WMem uint32 + FMem uint32 + TMem uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/tcp_linux.go b/vendor/github.com/vishvananda/netlink/tcp_linux.go index 293858738..e98036da5 100644 --- a/vendor/github.com/vishvananda/netlink/tcp_linux.go +++ b/vendor/github.com/vishvananda/netlink/tcp_linux.go @@ -8,6 +8,7 @@ import ( const ( tcpBBRInfoLen = 20 + memInfoLen = 16 ) func checkDeserErr(err error) error { @@ -351,3 +352,17 @@ func (t *TCPBBRInfo) deserialize(b []byte) error { return nil } + +func (m *MemInfo) deserialize(b []byte) error { + if len(b) != memInfoLen { + return errors.New("Invalid length") + } + + rb := bytes.NewBuffer(b) + m.RMem = native.Uint32(rb.Next(4)) + m.WMem = native.Uint32(rb.Next(4)) + m.FMem = native.Uint32(rb.Next(4)) + m.TMem = native.Uint32(rb.Next(4)) + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/unix_diag.go b/vendor/github.com/vishvananda/netlink/unix_diag.go new file mode 100644 index 000000000..d81776f36 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/unix_diag.go @@ -0,0 +1,27 @@ +package netlink + +// According to linux/include/uapi/linux/unix_diag.h +const ( + UNIX_DIAG_NAME = iota + UNIX_DIAG_VFS + UNIX_DIAG_PEER + UNIX_DIAG_ICONS + UNIX_DIAG_RQLEN + UNIX_DIAG_MEMINFO + UNIX_DIAG_SHUTDOWN + UNIX_DIAG_UID + UNIX_DIAG_MAX +) + +type UnixDiagInfoResp struct { + DiagMsg *UnixSocket + Name *string + Peer *uint32 + Queue *QueueInfo + Shutdown *uint8 +} + +type QueueInfo struct { + RQueue uint32 + WQueue uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go new file mode 100644 index 000000000..7c15986d0 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go @@ -0,0 +1,463 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/vishvananda/netlink/nl" +) + +type vdpaDevID struct { + Name string + ID uint32 +} + +// VDPADev contains info about VDPA device +type VDPADev struct { + vdpaDevID + VendorID uint32 + MaxVQS uint32 + MaxVQSize uint16 + MinVQSize uint16 +} + +// VDPADevConfig contains configuration of the VDPA device +type VDPADevConfig struct { + vdpaDevID + Features uint64 + NegotiatedFeatures uint64 + Net VDPADevConfigNet +} + +// VDPADevVStats conatins vStats for the VDPA device +type VDPADevVStats struct { + vdpaDevID + QueueIndex uint32 + Vendor []VDPADevVStatsVendor + NegotiatedFeatures uint64 +} + +// VDPADevVStatsVendor conatins name and value for vendor specific vstat option +type VDPADevVStatsVendor struct { + Name string + Value uint64 +} + +// VDPADevConfigNet conatins status and net config for the VDPA device +type VDPADevConfigNet struct { + Status VDPADevConfigNetStatus + Cfg VDPADevConfigNetCfg +} + +// VDPADevConfigNetStatus contains info about net status +type VDPADevConfigNetStatus struct { + LinkUp bool + Announce bool +} + +// VDPADevConfigNetCfg contains net config for the VDPA device +type VDPADevConfigNetCfg struct { + MACAddr net.HardwareAddr + MaxVQP uint16 + MTU uint16 +} + +// VDPAMGMTDev conatins info about VDPA management device +type VDPAMGMTDev struct { + BusName string + DevName string + SupportedClasses uint64 + SupportedFeatures uint64 + MaxVQS uint32 +} + +// VDPANewDevParams contains parameters for new VDPA device +// use SetBits to configure requried features for the device +// example: +// +// VDPANewDevParams{Features: SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR)} +type VDPANewDevParams struct { + MACAddr net.HardwareAddr + MaxVQP uint16 + MTU uint16 + Features uint64 +} + +// SetBits set provided bits in the uint64 input value +// usage example: +// features := SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR) +func SetBits(input uint64, pos ...int) uint64 { + for _, p := range pos { + input |= 1 << uint64(p) + } + return input +} + +// IsBitSet check if specific bit is set in the uint64 input value +// usage example: +// hasNetClass := IsBitSet(mgmtDev, VIRTIO_ID_NET) +func IsBitSet(input uint64, pos int) bool { + val := input & (1 << uint64(pos)) + return val > 0 +} + +// VDPANewDev adds new VDPA device +// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]` +func VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error { + return pkgHandle.VDPANewDev(name, mgmtBus, mgmtName, params) +} + +// VDPADelDev removes VDPA device +// Equivalent to: `vdpa dev del ` +func VDPADelDev(name string) error { + return pkgHandle.VDPADelDev(name) +} + +// VDPAGetDevList returns list of VDPA devices +// Equivalent to: `vdpa dev show` +func VDPAGetDevList() ([]*VDPADev, error) { + return pkgHandle.VDPAGetDevList() +} + +// VDPAGetDevByName returns VDPA device selected by name +// Equivalent to: `vdpa dev show ` +func VDPAGetDevByName(name string) (*VDPADev, error) { + return pkgHandle.VDPAGetDevByName(name) +} + +// VDPAGetDevConfigList returns list of VDPA devices configurations +// Equivalent to: `vdpa dev config show` +func VDPAGetDevConfigList() ([]*VDPADevConfig, error) { + return pkgHandle.VDPAGetDevConfigList() +} + +// VDPAGetDevConfigByName returns VDPA device configuration selected by name +// Equivalent to: `vdpa dev config show ` +func VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) { + return pkgHandle.VDPAGetDevConfigByName(name) +} + +// VDPAGetDevVStats returns vstats for VDPA device +// Equivalent to: `vdpa dev vstats show qidx ` +func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { + return pkgHandle.VDPAGetDevVStats(name, queueIndex) +} + +// VDPAGetMGMTDevList returns list of mgmt devices +// Equivalent to: `vdpa mgmtdev show` +func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { + return pkgHandle.VDPAGetMGMTDevList() +} + +// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name +// Equivalent to: `vdpa mgmtdev show /` +func VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) { + return pkgHandle.VDPAGetMGMTDevByBusAndName(bus, name) +} + +type vdpaNetlinkMessage []syscall.NetlinkRouteAttr + +func (id *vdpaDevID) parseIDAttribute(attr syscall.NetlinkRouteAttr) { + switch attr.Attr.Type { + case nl.VDPA_ATTR_DEV_NAME: + id.Name = nl.BytesToString(attr.Value) + case nl.VDPA_ATTR_DEV_ID: + id.ID = native.Uint32(attr.Value) + } +} + +func (netStatus *VDPADevConfigNetStatus) parseStatusAttribute(value []byte) { + a := native.Uint16(value) + netStatus.Announce = (a & VIRTIO_NET_S_ANNOUNCE) > 0 + netStatus.LinkUp = (a & VIRTIO_NET_S_LINK_UP) > 0 +} + +func (d *VDPADev) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + d.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_VENDOR_ID: + d.VendorID = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_MAX_VQS: + d.MaxVQS = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_MAX_VQ_SIZE: + d.MaxVQSize = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_MIN_VQ_SIZE: + d.MinVQSize = native.Uint16(a.Value) + } + } +} + +func (c *VDPADevConfig) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + c.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_NET_CFG_MACADDR: + c.Net.Cfg.MACAddr = a.Value + case nl.VDPA_ATTR_DEV_NET_STATUS: + c.Net.Status.parseStatusAttribute(a.Value) + case nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP: + c.Net.Cfg.MaxVQP = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_NET_CFG_MTU: + c.Net.Cfg.MTU = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_FEATURES: + c.Features = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES: + c.NegotiatedFeatures = native.Uint64(a.Value) + } + } +} + +func (s *VDPADevVStats) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + s.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_QUEUE_INDEX: + s.QueueIndex = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_VENDOR_ATTR_NAME: + s.Vendor = append(s.Vendor, VDPADevVStatsVendor{Name: nl.BytesToString(a.Value)}) + case nl.VDPA_ATTR_DEV_VENDOR_ATTR_VALUE: + if len(s.Vendor) == 0 { + break + } + s.Vendor[len(s.Vendor)-1].Value = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES: + s.NegotiatedFeatures = native.Uint64(a.Value) + } + } +} + +func (d *VDPAMGMTDev) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + switch a.Attr.Type { + case nl.VDPA_ATTR_MGMTDEV_BUS_NAME: + d.BusName = nl.BytesToString(a.Value) + case nl.VDPA_ATTR_MGMTDEV_DEV_NAME: + d.DevName = nl.BytesToString(a.Value) + case nl.VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES: + d.SupportedClasses = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_SUPPORTED_FEATURES: + d.SupportedFeatures = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_MGMTDEV_MAX_VQS: + d.MaxVQS = native.Uint32(a.Value) + } + } +} + +func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) ([]vdpaNetlinkMessage, error) { + f, err := h.GenlFamilyGet(nl.VDPA_GENL_NAME) + if err != nil { + return nil, err + } + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_ACK|extraFlags) + req.AddData(&nl.Genlmsg{ + Command: command, + Version: nl.VDPA_GENL_VERSION, + }) + for _, a := range attrs { + req.AddData(a) + } + + resp, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + messages := make([]vdpaNetlinkMessage, 0, len(resp)) + for _, m := range resp { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + messages = append(messages, attrs) + } + return messages, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev))) + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + devs := make([]*VDPADev, 0, len(messages)) + for _, m := range messages { + d := &VDPADev{} + d.parseAttributes(m) + devs = append(devs, d) + } + return devs, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev))) + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + cfgs := make([]*VDPADevConfig, 0, len(messages)) + for _, m := range messages { + cfg := &VDPADevConfig{} + cfg.parseAttributes(m) + cfgs = append(cfgs, cfg) + } + return cfgs, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(*dev)), + ) + if bus != nil { + attrs = append(attrs, + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(*bus)), + ) + } + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + cfgs := make([]*VDPAMGMTDev, 0, len(messages)) + for _, m := range messages { + cfg := &VDPAMGMTDev{} + cfg.parseAttributes(m) + cfgs = append(cfgs, cfg) + } + return cfgs, nil +} + +// VDPANewDev adds new VDPA device +// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]` +func (h *Handle) VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error { + attrs := []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)), + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(mgmtName)), + } + if mgmtBus != "" { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(mgmtBus))) + } + if len(params.MACAddr) != 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MACADDR, params.MACAddr)) + } + if params.MaxVQP > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP, nl.Uint16Attr(params.MaxVQP))) + } + if params.MTU > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MTU, nl.Uint16Attr(params.MTU))) + } + if params.Features > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_FEATURES, nl.Uint64Attr(params.Features))) + } + _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_NEW, 0, attrs) + return err +} + +// VDPADelDev removes VDPA device +// Equivalent to: `vdpa dev del ` +func (h *Handle) VDPADelDev(name string) error { + _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_DEL, 0, []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name))}) + return err +} + +// VDPAGetDevList returns list of VDPA devices +// Equivalent to: `vdpa dev show` +func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) { + return h.vdpaDevGet(nil) +} + +// VDPAGetDevByName returns VDPA device selected by name +// Equivalent to: `vdpa dev show ` +func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) { + devs, err := h.vdpaDevGet(&name) + if err != nil { + return nil, err + } + if len(devs) == 0 { + return nil, fmt.Errorf("device not found") + } + return devs[0], nil +} + +// VDPAGetDevConfigList returns list of VDPA devices configurations +// Equivalent to: `vdpa dev config show` +func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) { + return h.vdpaDevConfigGet(nil) +} + +// VDPAGetDevConfigByName returns VDPA device configuration selected by name +// Equivalent to: `vdpa dev config show ` +func (h *Handle) VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) { + cfgs, err := h.vdpaDevConfigGet(&name) + if err != nil { + return nil, err + } + if len(cfgs) == 0 { + return nil, fmt.Errorf("configuration not found") + } + return cfgs[0], nil +} + +// VDPAGetDevVStats returns vstats for VDPA device +// Equivalent to: `vdpa dev vstats show qidx ` +func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_VSTATS_GET, 0, []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)), + nl.NewRtAttr(nl.VDPA_ATTR_DEV_QUEUE_INDEX, nl.Uint32Attr(queueIndex)), + }) + if err != nil { + return nil, err + } + if len(messages) == 0 { + return nil, fmt.Errorf("stats not found") + } + stats := &VDPADevVStats{} + stats.parseAttributes(messages[0]) + return stats, nil +} + +// VDPAGetMGMTDevList returns list of mgmt devices +// Equivalent to: `vdpa mgmtdev show` +func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { + return h.vdpaMGMTDevGet(nil, nil) +} + +// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name +// Equivalent to: `vdpa mgmtdev show /` +func (h *Handle) VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) { + var busPtr *string + if bus != "" { + busPtr = &bus + } + devs, err := h.vdpaMGMTDevGet(busPtr, &name) + if err != nil { + return nil, err + } + if len(devs) == 0 { + return nil, fmt.Errorf("mgmtdev not found") + } + return devs[0], nil +} diff --git a/vendor/github.com/vishvananda/netlink/virtio.go b/vendor/github.com/vishvananda/netlink/virtio.go new file mode 100644 index 000000000..78a497bbc --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/virtio.go @@ -0,0 +1,132 @@ +package netlink + +// features for virtio net +const ( + VIRTIO_NET_F_CSUM = 0 // Host handles pkts w/ partial csum + VIRTIO_NET_F_GUEST_CSUM = 1 // Guest handles pkts w/ partial csum + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS = 2 // Dynamic offload configuration. + VIRTIO_NET_F_MTU = 3 // Initial MTU advice + VIRTIO_NET_F_MAC = 5 // Host has given MAC address. + VIRTIO_NET_F_GUEST_TSO4 = 7 // Guest can handle TSOv4 in. + VIRTIO_NET_F_GUEST_TSO6 = 8 // Guest can handle TSOv6 in. + VIRTIO_NET_F_GUEST_ECN = 9 // Guest can handle TSO[6] w/ ECN in. + VIRTIO_NET_F_GUEST_UFO = 10 // Guest can handle UFO in. + VIRTIO_NET_F_HOST_TSO4 = 11 // Host can handle TSOv4 in. + VIRTIO_NET_F_HOST_TSO6 = 12 // Host can handle TSOv6 in. + VIRTIO_NET_F_HOST_ECN = 13 // Host can handle TSO[6] w/ ECN in. + VIRTIO_NET_F_HOST_UFO = 14 // Host can handle UFO in. + VIRTIO_NET_F_MRG_RXBUF = 15 // Host can merge receive buffers. + VIRTIO_NET_F_STATUS = 16 // virtio_net_config.status available + VIRTIO_NET_F_CTRL_VQ = 17 // Control channel available + VIRTIO_NET_F_CTRL_RX = 18 // Control channel RX mode support + VIRTIO_NET_F_CTRL_VLAN = 19 // Control channel VLAN filtering + VIRTIO_NET_F_CTRL_RX_EXTRA = 20 // Extra RX mode control support + VIRTIO_NET_F_GUEST_ANNOUNCE = 21 // Guest can announce device on the* network + VIRTIO_NET_F_MQ = 22 // Device supports Receive Flow Steering + VIRTIO_NET_F_CTRL_MAC_ADDR = 23 // Set MAC address + VIRTIO_NET_F_VQ_NOTF_COAL = 52 // Device supports virtqueue notification coalescing + VIRTIO_NET_F_NOTF_COAL = 53 // Device supports notifications coalescing + VIRTIO_NET_F_GUEST_USO4 = 54 // Guest can handle USOv4 in. + VIRTIO_NET_F_GUEST_USO6 = 55 // Guest can handle USOv6 in. + VIRTIO_NET_F_HOST_USO = 56 // Host can handle USO in. + VIRTIO_NET_F_HASH_REPORT = 57 // Supports hash report + VIRTIO_NET_F_GUEST_HDRLEN = 59 // Guest provides the exact hdr_len value. + VIRTIO_NET_F_RSS = 60 // Supports RSS RX steering + VIRTIO_NET_F_RSC_EXT = 61 // extended coalescing info + VIRTIO_NET_F_STANDBY = 62 // Act as standby for another device with the same MAC. + VIRTIO_NET_F_SPEED_DUPLEX = 63 // Device set linkspeed and duplex + VIRTIO_NET_F_GSO = 6 // Host handles pkts any GSO type +) + +// virtio net status +const ( + VIRTIO_NET_S_LINK_UP = 1 // Link is up + VIRTIO_NET_S_ANNOUNCE = 2 // Announcement is needed +) + +// virtio config +const ( + // Do we get callbacks when the ring is completely used, even if we've + // suppressed them? + VIRTIO_F_NOTIFY_ON_EMPTY = 24 + // Can the device handle any descriptor layout? + VIRTIO_F_ANY_LAYOUT = 27 + // v1.0 compliant + VIRTIO_F_VERSION_1 = 32 + // If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + // If set - use platform DMA tools to access the memory. + // Note the reverse polarity (compared to most other features), + // this is for compatibility with legacy systems. + VIRTIO_F_ACCESS_PLATFORM = 33 + // Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) + VIRTIO_F_IOMMU_PLATFORM = VIRTIO_F_ACCESS_PLATFORM + // This feature indicates support for the packed virtqueue layout. + VIRTIO_F_RING_PACKED = 34 + // Inorder feature indicates that all buffers are used by the device + // in the same order in which they have been made available. + VIRTIO_F_IN_ORDER = 35 + // This feature indicates that memory accesses by the driver and the + // device are ordered in a way described by the platform. + VIRTIO_F_ORDER_PLATFORM = 36 + // Does the device support Single Root I/O Virtualization? + VIRTIO_F_SR_IOV = 37 + // This feature indicates that the driver passes extra data (besides + // identifying the virtqueue) in its device notifications. + VIRTIO_F_NOTIFICATION_DATA = 38 + // This feature indicates that the driver uses the data provided by the device + // as a virtqueue identifier in available buffer notifications. + VIRTIO_F_NOTIF_CONFIG_DATA = 39 + // This feature indicates that the driver can reset a queue individually. + VIRTIO_F_RING_RESET = 40 +) + +// virtio device ids +const ( + VIRTIO_ID_NET = 1 // virtio net + VIRTIO_ID_BLOCK = 2 // virtio block + VIRTIO_ID_CONSOLE = 3 // virtio console + VIRTIO_ID_RNG = 4 // virtio rng + VIRTIO_ID_BALLOON = 5 // virtio balloon + VIRTIO_ID_IOMEM = 6 // virtio ioMemory + VIRTIO_ID_RPMSG = 7 // virtio remote processor messaging + VIRTIO_ID_SCSI = 8 // virtio scsi + VIRTIO_ID_9P = 9 // 9p virtio console + VIRTIO_ID_MAC80211_WLAN = 10 // virtio WLAN MAC + VIRTIO_ID_RPROC_SERIAL = 11 // virtio remoteproc serial link + VIRTIO_ID_CAIF = 12 // Virtio caif + VIRTIO_ID_MEMORY_BALLOON = 13 // virtio memory balloon + VIRTIO_ID_GPU = 16 // virtio GPU + VIRTIO_ID_CLOCK = 17 // virtio clock/timer + VIRTIO_ID_INPUT = 18 // virtio input + VIRTIO_ID_VSOCK = 19 // virtio vsock transport + VIRTIO_ID_CRYPTO = 20 // virtio crypto + VIRTIO_ID_SIGNAL_DIST = 21 // virtio signal distribution device + VIRTIO_ID_PSTORE = 22 // virtio pstore device + VIRTIO_ID_IOMMU = 23 // virtio IOMMU + VIRTIO_ID_MEM = 24 // virtio mem + VIRTIO_ID_SOUND = 25 // virtio sound + VIRTIO_ID_FS = 26 // virtio filesystem + VIRTIO_ID_PMEM = 27 // virtio pmem + VIRTIO_ID_RPMB = 28 // virtio rpmb + VIRTIO_ID_MAC80211_HWSIM = 29 // virtio mac80211-hwsim + VIRTIO_ID_VIDEO_ENCODER = 30 // virtio video encoder + VIRTIO_ID_VIDEO_DECODER = 31 // virtio video decoder + VIRTIO_ID_SCMI = 32 // virtio SCMI + VIRTIO_ID_NITRO_SEC_MOD = 33 // virtio nitro secure module + VIRTIO_ID_I2C_ADAPTER = 34 // virtio i2c adapter + VIRTIO_ID_WATCHDOG = 35 // virtio watchdog + VIRTIO_ID_CAN = 36 // virtio can + VIRTIO_ID_DMABUF = 37 // virtio dmabuf + VIRTIO_ID_PARAM_SERV = 38 // virtio parameter server + VIRTIO_ID_AUDIO_POLICY = 39 // virtio audio policy + VIRTIO_ID_BT = 40 // virtio bluetooth + VIRTIO_ID_GPIO = 41 // virtio gpio + // Virtio Transitional IDs + VIRTIO_TRANS_ID_NET = 0x1000 // transitional virtio net + VIRTIO_TRANS_ID_BLOCK = 0x1001 // transitional virtio block + VIRTIO_TRANS_ID_BALLOON = 0x1002 // transitional virtio balloon + VIRTIO_TRANS_ID_CONSOLE = 0x1003 // transitional virtio console + VIRTIO_TRANS_ID_SCSI = 0x1004 // transitional virtio SCSI + VIRTIO_TRANS_ID_RNG = 0x1005 // transitional virtio rng + VIRTIO_TRANS_ID_9P = 0x1009 // transitional virtio 9p console +) diff --git a/vendor/github.com/vishvananda/netlink/xdp_diag.go b/vendor/github.com/vishvananda/netlink/xdp_diag.go new file mode 100644 index 000000000..e88825bf5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xdp_diag.go @@ -0,0 +1,34 @@ +package netlink + +import "github.com/vishvananda/netlink/nl" + +const SOCK_ANY_COOKIE = uint64(nl.TCPDIAG_NOCOOKIE)<<32 + uint64(nl.TCPDIAG_NOCOOKIE) + +// XDP diagnosis show flag constants to request particular information elements. +const ( + XDP_SHOW_INFO = 1 << iota + XDP_SHOW_RING_CFG + XDP_SHOW_UMEM + XDP_SHOW_MEMINFO + XDP_SHOW_STATS +) + +// XDP diag element constants +const ( + XDP_DIAG_NONE = iota + XDP_DIAG_INFO // when using XDP_SHOW_INFO + XDP_DIAG_UID // when using XDP_SHOW_INFO + XDP_DIAG_RX_RING // when using XDP_SHOW_RING_CFG + XDP_DIAG_TX_RING // when using XDP_SHOW_RING_CFG + XDP_DIAG_UMEM // when using XDP_SHOW_UMEM + XDP_DIAG_UMEM_FILL_RING // when using XDP_SHOW_UMEM + XDP_DIAG_UMEM_COMPLETION_RING // when using XDP_SHOW_UMEM + XDP_DIAG_MEMINFO // when using XDP_SHOW_MEMINFO + XDP_DIAG_STATS // when using XDP_SHOW_STATS +) + +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21 +type XDPDiagInfoResp struct { + XDPDiagMsg *XDPSocket + XDPInfo *XDPInfo +} diff --git a/vendor/github.com/vishvananda/netlink/xdp_linux.go b/vendor/github.com/vishvananda/netlink/xdp_linux.go new file mode 100644 index 000000000..896a406de --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xdp_linux.go @@ -0,0 +1,46 @@ +package netlink + +import ( + "bytes" + "fmt" +) + +const ( + xdrDiagUmemLen = 8 + 8*4 + xdrDiagStatsLen = 6 * 8 +) + +func (x *XDPDiagUmem) deserialize(b []byte) error { + if len(b) < xdrDiagUmemLen { + return fmt.Errorf("XDP umem diagnosis data short read (%d); want %d", len(b), xdrDiagUmemLen) + } + + rb := bytes.NewBuffer(b) + x.Size = native.Uint64(rb.Next(8)) + x.ID = native.Uint32(rb.Next(4)) + x.NumPages = native.Uint32(rb.Next(4)) + x.ChunkSize = native.Uint32(rb.Next(4)) + x.Headroom = native.Uint32(rb.Next(4)) + x.Ifindex = native.Uint32(rb.Next(4)) + x.QueueID = native.Uint32(rb.Next(4)) + x.Flags = native.Uint32(rb.Next(4)) + x.Refs = native.Uint32(rb.Next(4)) + + return nil +} + +func (x *XDPDiagStats) deserialize(b []byte) error { + if len(b) < xdrDiagStatsLen { + return fmt.Errorf("XDP diagnosis statistics short read (%d); want %d", len(b), xdrDiagStatsLen) + } + + rb := bytes.NewBuffer(b) + x.RxDropped = native.Uint64(rb.Next(8)) + x.RxInvalid = native.Uint64(rb.Next(8)) + x.RxFull = native.Uint64(rb.Next(8)) + x.FillRingEmpty = native.Uint64(rb.Next(8)) + x.TxInvalid = native.Uint64(rb.Next(8)) + x.TxRingEmpty = native.Uint64(rb.Next(8)) + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm_linux.go similarity index 95% rename from vendor/github.com/vishvananda/netlink/xfrm.go rename to vendor/github.com/vishvananda/netlink/xfrm_linux.go index 02b41842e..dd38ed8e0 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_linux.go @@ -14,7 +14,7 @@ const ( XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP XFRM_PROTO_AH Proto = unix.IPPROTO_AH XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS - XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin + XFRM_PROTO_COMP Proto = unix.IPPROTO_COMP XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW ) diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go deleted file mode 100644 index b7532b092..000000000 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy.go +++ /dev/null @@ -1,97 +0,0 @@ -package netlink - -import ( - "fmt" - "net" -) - -// Dir is an enum representing an ipsec template direction. -type Dir uint8 - -const ( - XFRM_DIR_IN Dir = iota - XFRM_DIR_OUT - XFRM_DIR_FWD - XFRM_SOCKET_IN - XFRM_SOCKET_OUT - XFRM_SOCKET_FWD -) - -func (d Dir) String() string { - switch d { - case XFRM_DIR_IN: - return "dir in" - case XFRM_DIR_OUT: - return "dir out" - case XFRM_DIR_FWD: - return "dir fwd" - case XFRM_SOCKET_IN: - return "socket in" - case XFRM_SOCKET_OUT: - return "socket out" - case XFRM_SOCKET_FWD: - return "socket fwd" - } - return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) -} - -// PolicyAction is an enum representing an ipsec policy action. -type PolicyAction uint8 - -const ( - XFRM_POLICY_ALLOW PolicyAction = 0 - XFRM_POLICY_BLOCK PolicyAction = 1 -) - -func (a PolicyAction) String() string { - switch a { - case XFRM_POLICY_ALLOW: - return "allow" - case XFRM_POLICY_BLOCK: - return "block" - default: - return fmt.Sprintf("action %d", a) - } -} - -// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec -// policy. These rules are matched with XfrmState to determine encryption -// and authentication algorithms. -type XfrmPolicyTmpl struct { - Dst net.IP - Src net.IP - Proto Proto - Mode Mode - Spi int - Reqid int - Optional int -} - -func (t XfrmPolicyTmpl) String() string { - return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}", - t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid) -} - -// XfrmPolicy represents an ipsec policy. It represents the overlay network -// and has a list of XfrmPolicyTmpls representing the base addresses of -// the policy. -type XfrmPolicy struct { - Dst *net.IPNet - Src *net.IPNet - Proto Proto - DstPort int - SrcPort int - Dir Dir - Priority int - Index int - Action PolicyAction - Ifindex int - Ifid int - Mark *XfrmMark - Tmpls []XfrmPolicyTmpl -} - -func (p XfrmPolicy) String() string { - return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", - p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) -} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index 358496804..d526739ce 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,10 +1,104 @@ package netlink import ( + "fmt" + "net" + "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) +// Dir is an enum representing an ipsec template direction. +type Dir uint8 + +const ( + XFRM_DIR_IN Dir = iota + XFRM_DIR_OUT + XFRM_DIR_FWD + XFRM_SOCKET_IN + XFRM_SOCKET_OUT + XFRM_SOCKET_FWD +) + +func (d Dir) String() string { + switch d { + case XFRM_DIR_IN: + return "dir in" + case XFRM_DIR_OUT: + return "dir out" + case XFRM_DIR_FWD: + return "dir fwd" + case XFRM_SOCKET_IN: + return "socket in" + case XFRM_SOCKET_OUT: + return "socket out" + case XFRM_SOCKET_FWD: + return "socket fwd" + } + return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) +} + +// PolicyAction is an enum representing an ipsec policy action. +type PolicyAction uint8 + +const ( + XFRM_POLICY_ALLOW PolicyAction = 0 + XFRM_POLICY_BLOCK PolicyAction = 1 +) + +func (a PolicyAction) String() string { + switch a { + case XFRM_POLICY_ALLOW: + return "allow" + case XFRM_POLICY_BLOCK: + return "block" + default: + return fmt.Sprintf("action %d", a) + } +} + +// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec +// policy. These rules are matched with XfrmState to determine encryption +// and authentication algorithms. +type XfrmPolicyTmpl struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + Optional int +} + +func (t XfrmPolicyTmpl) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}", + t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid) +} + +// XfrmPolicy represents an ipsec policy. It represents the overlay network +// and has a list of XfrmPolicyTmpls representing the base addresses of +// the policy. +type XfrmPolicy struct { + Dst *net.IPNet + Src *net.IPNet + Proto Proto + DstPort int + SrcPort int + Dir Dir + Priority int + Index int + Action PolicyAction + Ifindex int + Ifid int + Mark *XfrmMark + Tmpls []XfrmPolicyTmpl +} + +func (p XfrmPolicy) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", + p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) +} + func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { sel.Family = uint16(nl.FAMILY_V4) if policy.Dst != nil { @@ -75,6 +169,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl]) userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst) userTmpl.Saddr.FromIP(tmpl.Src) + userTmpl.Family = uint16(nl.GetIPFamily(tmpl.Dst)) userTmpl.XfrmId.Proto = uint8(tmpl.Proto) userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi)) userTmpl.Mode = uint8(tmpl.Mode) @@ -223,8 +318,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { var policy XfrmPolicy - policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD) - policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS) + policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, uint16(family)) + policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, uint16(family)) policy.Proto = Proto(msg.Sel.Proto) policy.DstPort = int(nl.Swap16(msg.Sel.Dport)) policy.SrcPort = int(nl.Swap16(msg.Sel.Sport)) diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go deleted file mode 100644 index 19df82c76..000000000 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ /dev/null @@ -1,131 +0,0 @@ -package netlink - -import ( - "fmt" - "net" - "time" -) - -// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. -type XfrmStateAlgo struct { - Name string - Key []byte - TruncateLen int // Auth only - ICVLen int // AEAD only -} - -func (a XfrmStateAlgo) String() string { - base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key) - if a.TruncateLen != 0 { - base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen) - } - if a.ICVLen != 0 { - base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen) - } - return fmt.Sprintf("%s}", base) -} - -// EncapType is an enum representing the optional packet encapsulation. -type EncapType uint8 - -const ( - XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 - XFRM_ENCAP_ESPINUDP -) - -func (e EncapType) String() string { - switch e { - case XFRM_ENCAP_ESPINUDP_NONIKE: - return "espinudp-non-ike" - case XFRM_ENCAP_ESPINUDP: - return "espinudp" - } - return "unknown" -} - -// XfrmStateEncap represents the encapsulation to use for the ipsec encryption. -type XfrmStateEncap struct { - Type EncapType - SrcPort int - DstPort int - OriginalAddress net.IP -} - -func (e XfrmStateEncap) String() string { - return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}", - e.Type, e.SrcPort, e.DstPort, e.OriginalAddress) -} - -// XfrmStateLimits represents the configured limits for the state. -type XfrmStateLimits struct { - ByteSoft uint64 - ByteHard uint64 - PacketSoft uint64 - PacketHard uint64 - TimeSoft uint64 - TimeHard uint64 - TimeUseSoft uint64 - TimeUseHard uint64 -} - -// XfrmStateStats represents the current number of bytes/packets -// processed by this State, the State's installation and first use -// time and the replay window counters. -type XfrmStateStats struct { - ReplayWindow uint32 - Replay uint32 - Failed uint32 - Bytes uint64 - Packets uint64 - AddTime uint64 - UseTime uint64 -} - -// XfrmState represents the state of an ipsec policy. It optionally -// contains an XfrmStateAlgo for encryption and one for authentication. -type XfrmState struct { - Dst net.IP - Src net.IP - Proto Proto - Mode Mode - Spi int - Reqid int - ReplayWindow int - Limits XfrmStateLimits - Statistics XfrmStateStats - Mark *XfrmMark - OutputMark *XfrmMark - Ifid int - Auth *XfrmStateAlgo - Crypt *XfrmStateAlgo - Aead *XfrmStateAlgo - Encap *XfrmStateEncap - ESN bool -} - -func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", - sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) -} -func (sa XfrmState) Print(stats bool) string { - if !stats { - return sa.String() - } - at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) - ut := "-" - if sa.Statistics.UseTime > 0 { - ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) - } - return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ - "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", - sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), - sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, - sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) -} - -func printLimit(lmt uint64) string { - if lmt == ^uint64(0) { - return "(INF)" - } - return fmt.Sprintf("%d", lmt) -} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 61a2d2dea..554f2498c 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -2,12 +2,154 @@ package netlink import ( "fmt" + "net" + "time" "unsafe" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) +// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. +type XfrmStateAlgo struct { + Name string + Key []byte + TruncateLen int // Auth only + ICVLen int // AEAD only +} + +func (a XfrmStateAlgo) String() string { + base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key) + if a.TruncateLen != 0 { + base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen) + } + if a.ICVLen != 0 { + base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen) + } + return fmt.Sprintf("%s}", base) +} + +// EncapType is an enum representing the optional packet encapsulation. +type EncapType uint8 + +const ( + XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 + XFRM_ENCAP_ESPINUDP +) + +func (e EncapType) String() string { + switch e { + case XFRM_ENCAP_ESPINUDP_NONIKE: + return "espinudp-non-ike" + case XFRM_ENCAP_ESPINUDP: + return "espinudp" + } + return "unknown" +} + +// XfrmStateEncap represents the encapsulation to use for the ipsec encryption. +type XfrmStateEncap struct { + Type EncapType + SrcPort int + DstPort int + OriginalAddress net.IP +} + +func (e XfrmStateEncap) String() string { + return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}", + e.Type, e.SrcPort, e.DstPort, e.OriginalAddress) +} + +// XfrmStateLimits represents the configured limits for the state. +type XfrmStateLimits struct { + ByteSoft uint64 + ByteHard uint64 + PacketSoft uint64 + PacketHard uint64 + TimeSoft uint64 + TimeHard uint64 + TimeUseSoft uint64 + TimeUseHard uint64 +} + +// XfrmStateStats represents the current number of bytes/packets +// processed by this State, the State's installation and first use +// time and the replay window counters. +type XfrmStateStats struct { + ReplayWindow uint32 + Replay uint32 + Failed uint32 + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + +// XfrmReplayState represents the sequence number states for +// "legacy" anti-replay mode. +type XfrmReplayState struct { + OSeq uint32 + Seq uint32 + BitMap uint32 +} + +func (r XfrmReplayState) String() string { + return fmt.Sprintf("{OSeq: 0x%x, Seq: 0x%x, BitMap: 0x%x}", + r.OSeq, r.Seq, r.BitMap) +} + +// XfrmState represents the state of an ipsec policy. It optionally +// contains an XfrmStateAlgo for encryption and one for authentication. +type XfrmState struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + ReplayWindow int + Limits XfrmStateLimits + Statistics XfrmStateStats + Mark *XfrmMark + OutputMark *XfrmMark + Ifid int + Auth *XfrmStateAlgo + Crypt *XfrmStateAlgo + Aead *XfrmStateAlgo + Encap *XfrmStateEncap + ESN bool + DontEncapDSCP bool + OSeqMayWrap bool + Replay *XfrmReplayState + Selector *XfrmPolicy +} + +func (sa XfrmState) String() string { + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) +} +func (sa XfrmState) Print(stats bool) string { + if !stats { + return sa.String() + } + at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) + ut := "-" + if sa.Statistics.UseTime > 0 { + ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) + } + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ + "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", + sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, + sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) +} + +func printLimit(lmt uint64) string { + if lmt == ^uint64(0) { + return "(INF)" + } + return fmt.Sprintf("%d", lmt) +} func writeStateAlgo(a *XfrmStateAlgo) []byte { algo := nl.XfrmAlgo{ AlgKeyLen: uint32(len(a.Key) * 8), @@ -77,6 +219,14 @@ func writeReplayEsn(replayWindow int) []byte { return replayEsn.Serialize() } +func writeReplay(r *XfrmReplayState) []byte { + return (&nl.XfrmReplayState{ + OSeq: r.OSeq, + Seq: r.Seq, + BitMap: r.BitMap, + }).Serialize() +} + // XfrmStateAdd will add an xfrm state to the system. // Equivalent to: `ip xfrm state add $state` func XfrmStateAdd(state *XfrmState) error { @@ -166,6 +316,21 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } } + if state.OSeqMayWrap || state.DontEncapDSCP { + var flags uint32 + if state.DontEncapDSCP { + flags |= nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP + } + if state.OSeqMayWrap { + flags |= nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP + } + out := nl.NewRtAttr(nl.XFRMA_SA_EXTRA_FLAGS, nl.Uint32Attr(flags)) + req.AddData(out) + } + if state.Replay != nil { + out := nl.NewRtAttr(nl.XFRMA_REPLAY_VAL, writeReplay(state.Replay)) + req.AddData(out) + } if state.Ifid != 0 { ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) @@ -186,7 +351,6 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { msg.Min = 0x100 msg.Max = 0xffffffff req.AddData(msg) - if state.Mark != nil { out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) req.AddData(out) @@ -314,7 +478,6 @@ var familyError = fmt.Errorf("family error") func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { var state XfrmState - state.Dst = msg.Id.Daddr.ToIP() state.Src = msg.Saddr.ToIP() state.Proto = Proto(msg.Id.Proto) @@ -324,20 +487,25 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) + state.Selector = &XfrmPolicy{ + Dst: msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, msg.Sel.Family), + Src: msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, msg.Sel.Family), + Proto: Proto(msg.Sel.Proto), + DstPort: int(nl.Swap16(msg.Sel.Dport)), + SrcPort: int(nl.Swap16(msg.Sel.Sport)), + Ifindex: int(msg.Sel.Ifindex), + } return &state } func parseXfrmState(m []byte, family int) (*XfrmState, error) { msg := nl.DeserializeXfrmUsersaInfo(m) - // This is mainly for the state dump if family != FAMILY_ALL && family != int(msg.Family) { return nil, familyError } - state := xfrmStateFromXfrmUsersaInfo(msg) - attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:]) if err != nil { return nil, err @@ -385,6 +553,14 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { state.Mark = new(XfrmMark) state.Mark.Value = mark.Value state.Mark.Mask = mark.Mask + case nl.XFRMA_SA_EXTRA_FLAGS: + flags := native.Uint32(attr.Value) + if (flags & nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP) != 0 { + state.DontEncapDSCP = true + } + if (flags & nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP) != 0 { + state.OSeqMayWrap = true + } case nl.XFRMA_SET_MARK: if state.OutputMark == nil { state.OutputMark = new(XfrmMark) @@ -400,6 +576,14 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { } case nl.XFRMA_IF_ID: state.Ifid = int(native.Uint32(attr.Value)) + case nl.XFRMA_REPLAY_VAL: + if state.Replay == nil { + state.Replay = new(XfrmReplayState) + } + replay := nl.DeserializeXfrmReplayState(attr.Value[:]) + state.Replay.OSeq = replay.OSeq + state.Replay.Seq = replay.Seq + state.Replay.BitMap = replay.BitMap } } @@ -476,6 +660,9 @@ func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg.Id.Spi = nl.Swap32(uint32(state.Spi)) msg.Reqid = uint32(state.Reqid) msg.ReplayWindow = uint8(state.ReplayWindow) - + msg.Sel = nl.XfrmSelector{} + if state.Selector != nil { + selFromPolicy(&msg.Sel, state.Selector) + } return msg } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go new file mode 100644 index 000000000..12fdd26d7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go @@ -0,0 +1,7 @@ +//go:build !linux +// +build !linux + +package netlink + +type XfrmPolicy struct{} +type XfrmState struct{} diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index f124bfa15..013173af5 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.21.9 +1.22.6 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 18154c638..214077974 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -41,6 +41,15 @@ coverage: TEST_FREELIST_TYPE=array go test -v -timeout 30m \ -coverprofile cover-freelist-array.out -covermode atomic +BOLT_CMD=bbolt + +build: + go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD} + +.PHONY: clean +clean: # Clean binaries + rm -f ./bin/${BOLT_CMD} + .PHONY: gofail-enable gofail-enable: install-gofail gofail enable . @@ -61,3 +70,7 @@ test-failpoint: @echo "[failpoint] array freelist test" TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint +.PHONY: test-robustness # Running robustness tests requires root permission +test-robustness: + go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + go test -v ${TESTFLAGS} ./tests/robustness -test.root diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 4175bdf3d..822798e41 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -524,7 +524,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) + return fmt.Errorf("unmap error: %v", err.Error()) } return nil @@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { - return fmt.Errorf("munlock error: " + err.Error()) + return fmt.Errorf("munlock error: %v", err.Error()) } return nil } @@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { - return fmt.Errorf("mlock error: " + err.Error()) + return fmt.Errorf("mlock error: %v", err.Error()) } return nil } @@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error { // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if runtime.GOOS != "windows" { + // gofail: var resizeFileError string + // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index 61d43f81b..dffc7bc74 100644 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) { } // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range f.allocs { + if tid == txid { + delete(f.allocs, pgid) + } + } + f.mergeSpans(m) } diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 2fac8c0a7..766395de3 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -1,6 +1,7 @@ package bbolt import ( + "errors" "fmt" "io" "os" @@ -185,6 +186,10 @@ func (tx *Tx) Commit() error { // If the high water mark has moved up then attempt to grow the database. if tx.meta.pgid > opgid { + _ = errors.New("") + // gofail: var lackOfDiskSpace string + // tx.rollback() + // return errors.New(lackOfDiskSpace) if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { tx.rollback() return err @@ -470,6 +475,7 @@ func (tx *Tx) write() error { // Ignore file sync if flag is set on DB. if !tx.db.NoSync || IgnoreNoSync { + // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { return err } @@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error { return err } if !tx.db.NoSync || IgnoreNoSync { + // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { return err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 92b8cf73c..6aae83bfd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -23,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index cabf645a5..214acaf58 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a1b5b5e5a..f0a9bb9ef 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -111,7 +100,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 38c7f01c7..56b24b982 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended // to be used to add tracing by wrapping existing handlers (with Handler) and diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 1fc15019e..d01bdccf4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -1,32 +1,20 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( - "io" "net/http" "time" "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -46,6 +34,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool + traceSemconv semconv.HTTPServer requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram @@ -67,6 +56,8 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, + + traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -143,12 +134,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), - } - if h.server != "" { - hostAttr := semconv.NetHostName(h.server) - opts = append(opts, trace.WithAttributes(hostAttr)) + trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), } + opts = append(opts, h.spanStartOptions...) if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { opts = append(opts, trace.WithNewRoot()) @@ -217,23 +205,36 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err) + span.SetStatus(semconv.ServerStatus(rww.statusCode)) + span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: rww.statusCode, + ReadBytes: bw.read.Load(), + ReadError: bw.err, + WriteBytes: rww.written, + WriteError: rww.err, + })...) // Add metrics attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + h.responseBytesCounter.Add(ctx, rww.written, addOpts...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) @@ -241,37 +242,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http h.serverLatencyMeasure.Record(ctx, elapsedTime, o) } -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) - - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) -} - // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer().Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 000000000..3ec0ad00c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +func NewHTTPServer() HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) + return HTTPServer{duplicate: env == "http/dup"} +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 000000000..e7f293761 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 000000000..c3e838aaa --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go new file mode 100644 index 000000000..0c5d4c460 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go index edf4ce3d3..7aa5f99e8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 0efd5261f..a73bb06e9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -2,18 +2,7 @@ // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d3a06e0ca..a9a9226b3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -2,17 +2,7 @@ // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -102,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -148,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 26a51a180..ea504e396 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -48,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 43e937a67..0d3cb2e4a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -22,15 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -148,8 +136,10 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. @@ -181,11 +171,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.responseBytesCounter.Add(ctx, n, addOpts...) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 35254e888..b0957f28c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.49.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index 2852ec971..948f8406c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -98,3 +87,13 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) { } w.ResponseWriter.WriteHeader(statusCode) } + +func (w *respWriterWrapper) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c..6bf3abc41 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3..e2cb3ea94 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f5698..000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f38..6d9c8b649 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -11,6 +11,7 @@ linters: enable: - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +22,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +128,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 98f2d2043..c01e6998e 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,159 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + ## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 This release is the last to support [Go 1.20]. @@ -22,6 +175,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -138,7 +292,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -170,15 +324,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -814,7 +968,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1859,7 +2013,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2849,7 +3003,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 [1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 [1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 @@ -2937,3 +3095,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d92..202554933 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index c9f2bac55..b86572f58 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -615,17 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -633,6 +644,8 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 6de95219b..f33619f76 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -25,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -81,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -110,7 +96,7 @@ $(PYTOOLS): @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) +$(PYTOOLS)/%: $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell @@ -124,18 +110,18 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) +go-work: $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build @@ -178,7 +164,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -209,23 +195,23 @@ golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,23 +220,23 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) .PHONY: license-check @@ -263,15 +249,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -284,13 +261,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +277,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 7766259a5..5a8909317 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -51,19 +51,16 @@ Currently, this project supports the following environments. |---------|------------|--------------| | Ubuntu | 1.22 | amd64 | | Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | | Ubuntu | 1.22 | 386 | | Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | | MacOS | 1.22 | amd64 | | MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | | Windows | 1.22 | amd64 | | Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | | Windows | 1.22 | 386 | | Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -100,12 +97,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0bd..940f57f3d 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 000000000..5b3da8f14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424d..eef51ebc2 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc5766..318e42fca 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d5..be9cd922d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271fb..f2ba89ce4 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e4..d9a22c650 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce05..3028f9a40 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index fb6da5145..bff9c7fdb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c0..9ea0ecbbd 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 000000000..7d798435e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77d..c40c896cc 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -67,10 +57,10 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) @@ -232,13 +222,13 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) @@ -309,10 +299,10 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. + // Decode a percent-encoded value. value, err := url.PathUnescape(val) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } return Member{key: key, value: value, properties: props, hasData: true}, nil } @@ -345,9 +335,9 @@ func (m Member) String() string { // A key is just an ASCII string. A value is restricted to be // US-ASCII characters excluding CTLs, whitespace, // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -616,7 +606,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. + // Decode a percent-encoded value. value, err := url.PathUnescape(s[valueStart:valueEnd]) if err != nil { return @@ -641,6 +631,95 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,17 +735,7 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } func validateValue(s string) bool { @@ -679,12 +748,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b756..a572461a0 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100df..b51d87cab 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 000000000..24c52b387 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4e..df29d96a6 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb4..ee8db448b 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e8..441c59501 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad8541..67414c71e 100644 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d3..93e80ea30 100644 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3bb..07623b679 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3f..822d84794 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408e..b4f85f44a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d9..3aea9c491 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e9..4259f0320 100644 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b83047..c657ff8e7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c206..3a0cc42f6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2b..adbca7d34 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 7ed61c0e2..cfd1df9bf 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -76,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -175,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -241,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c2..38560ff99 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 386c8bfdc..204ea142a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12a..e31f442b4 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -97,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -112,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // @@ -193,6 +180,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e79400..3e7bb3b35 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5d..6de7f2e4d 100644 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index f95517195..1e6473b32 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 000000000..0cf902e01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e8..cf23db778 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf02..c82ba5324 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d74..d9e3b13e4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13b..f153745b0 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 000000000..1f6e0efa7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2e..1a9dc6809 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd533..ea52e4023 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74a..6a7991e01 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,37 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,6 +87,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The @@ -88,6 +101,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -98,23 +115,47 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,6 +165,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and @@ -134,6 +179,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -144,6 +193,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae6..8403a4bad 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32f..783fdfba7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32c..2fd949733 100644 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 000000000..e2959ac74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1cb..552263ba7 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb285..33a3baf15 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f73..8c8286aab 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d6222..6870e316d 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 000000000..8c5ac55ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e138..ab09daf9d 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 000000000..82e1f46b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go index 67d1d4c44..6685c392b 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 359c5a696..0d1f55a8f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go index 8ac9350d2..637763932 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index 09ff4dfdb..f40c97825 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go index 342aede95..9c1840631 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go index a2b906742..3d44dae27 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index e449e5c3b..95d0210e3 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go index 851774148..90b1b0452 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md new file mode 100644 index 000000000..0b6cbe960 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.24.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go new file mode 100644 index 000000000..6e688345c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go @@ -0,0 +1,4387 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes FaaS attributes. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes Database attributes +const ( + // PoolNameKey is the attribute Key conforming to the "pool.name" semantic + // conventions. It represents the name of the connection pool; unique + // within the instrumented application. In case the connection pool + // implementation doesn't provide a name, then the + // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) + // should be used + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myDataSource' + PoolNameKey = attribute.Key("pool.name") + + // StateKey is the attribute Key conforming to the "state" semantic + // conventions. It represents the state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'idle' + StateKey = attribute.Key("state") +) + +var ( + // idle + StateIdle = StateKey.String("idle") + // used + StateUsed = StateKey.String("used") +) + +// PoolName returns an attribute KeyValue conforming to the "pool.name" +// semantic conventions. It represents the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, then the +// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) +// should be used +func PoolName(val string) attribute.KeyValue { + return PoolNameKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: experimental + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if the matched endpoint for the + // request had a rate-limiting policy.) + // Stability: experimental + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (if and only if the request was + // not handled.) + // Stability: experimental + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If and only if a route was + // successfully matched.) + // Stability: experimental + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// Describes JVM buffer metric attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// Describes System metric attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU metric attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory metric attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging metric attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem metric attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network metric attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process metric attributes +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // process state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) + +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + // Deprecated: use `http.request.method` instead. + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.request.header.content-length` instead. + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.response.header.content-length` instead. + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + // Deprecated: use `url.scheme` instead. + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + // Deprecated: use `http.response.status_code` instead. + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.path` and `url.query` instead. + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.full` instead. + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Deprecated: use `user_agent.original` instead. + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. +// +// Deprecated: use `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. +// +// Deprecated: use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. +// +// Deprecated: use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. +// +// Deprecated: use `url.scheme` instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. +// +// Deprecated: use `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. +// +// Deprecated: use `url.path` and `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. +// +// Deprecated: use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. +// +// Deprecated: use `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address`. + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port`. + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address` on client spans and `client.address` on + // server spans. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port` on client spans and `client.port` on + // server spans. + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + // Deprecated: use `network.protocol.name`. + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + // Deprecated: use `network.protocol.version`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: use `network.local.address`. + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `network.local.port`. + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + // Deprecated: use `network.peer.address`. + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: no replacement at this time. + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + // Deprecated: use `network.peer.port`. + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport`. + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // ip_tcp + // + // Deprecated: use `network.transport`. + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + // + // Deprecated: use `network.transport`. + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + // + // Deprecated: use `network.transport`. + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + // + // Deprecated: use `network.transport`. + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + // + // Deprecated: use `network.transport`. + NetTransportOther = NetTransportKey.String("other") +) + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. +// +// Deprecated: use `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. +// +// Deprecated: use `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. +// +// Deprecated: use `server.address` on client spans and `client.address` on +// server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. +// +// Deprecated: use `server.port` on client spans and `client.port` on server +// spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. +// +// Deprecated: use `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. +// +// Deprecated: use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. +// +// Deprecated: use `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. +// +// Deprecated: use `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. +// +// Deprecated: use `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. +// +// Deprecated: no replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. +// +// Deprecated: use `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents an identifier for + // the messaging system being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationPublish = MessagingOperationKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationCreate = MessagingOperationKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationReceive = MessagingOperationKey.String("receive") + // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs + MessagingOperationDeliver = MessagingOperationKey.String("deliver") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + // Azure Event Hubs + MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + // Azure Service Bus + MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the protocol specified in `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the protocol specified in `network.protocol.name`. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") +) + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go new file mode 100644 index 000000000..d27e8a8f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.24.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go new file mode 100644 index 000000000..6c019aafc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // IosStateKey is the attribute Key conforming to the "ios.state" semantic + // conventions. It represents the this attribute represents the state the + // application has transitioned into at the occurrence of the event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate + // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), + // and from which the `OS terminology` column values are derived. + IosStateKey = attribute.Key("ios.state") +) + +var ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + IosStateActive = IosStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + IosStateInactive = IosStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + IosStateBackground = IosStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + IosStateForeground = IosStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + IosStateTerminate = IosStateKey.String("terminate") +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the this attribute represents the + // state the application has transitioned into at the occurrence of the + // event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessageTypeKey = attribute.Key("message.type") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go new file mode 100644 index 000000000..7235bb51d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go new file mode 100644 index 000000000..a6b953f62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go new file mode 100644 index 000000000..d66bbe9c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go @@ -0,0 +1,2545 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val int) attribute.KeyValue { + return HostCPUSteppingKey.Int(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// A serverless instance. +const ( + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + // Deprecated: use the `otel.scope.name` attribute. + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + // Deprecated: use the `otel.scope.version` attribute. + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. +// +// Deprecated: use the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. +// +// Deprecated: use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go new file mode 100644 index 000000000..fe80b1731 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go new file mode 100644 index 000000000..c1718234e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go @@ -0,0 +1,1323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") +) + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") +) + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index caf7249de..6836c6547 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 000000000..58ccaba69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66cf..273d58e00 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083c..5650a174b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d756..d661c5d10 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 000000000..7754a239e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a75..3e359a00b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb8161..c00221e7b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491ca..ca20e9997 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b2260..28877d4ab 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -361,6 +350,12 @@ type Span interface { // AddEvent adds an event with the provided name and options. AddEvent(name string, options ...EventOption) + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + // IsRecording returns the recording state of the Span. It will return // true if the Span is active and events can be recorded. IsRecording() bool diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5b..20b5cf243 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh index dbb61a422..e57bf57fc 100644 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -1,18 +1,7 @@ #!/bin/bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 000000000..1e87855ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7b2993a1f..ab2896052 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.24.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 1b556e678..241cfc82a 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,20 +1,9 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.24.0 + version: v1.28.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -40,17 +29,21 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.46.0 + version: v0.50.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.0.1-alpha + version: v0.4.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.8 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index db42e6676..c709b7284 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego +//go:build (!arm64 && !s390x && !ppc64 && !ppc64le) || !gc || purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go similarity index 89% rename from vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go rename to vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go index 3a4287f99..bd183d9ba 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s similarity index 76% rename from vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s rename to vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s index c672ccf69..a660b4112 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s @@ -19,7 +19,7 @@ // The differences in this and the original implementation are // due to the calling conventions and initialization of constants. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) #include "textflag.h" @@ -36,32 +36,68 @@ // for VPERMXOR #define MASK R18 -DATA consts<>+0x00(SB)/8, $0x3320646e61707865 -DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 -DATA consts<>+0x10(SB)/8, $0x0000000000000001 -DATA consts<>+0x18(SB)/8, $0x0000000000000000 -DATA consts<>+0x20(SB)/8, $0x0000000000000004 -DATA consts<>+0x28(SB)/8, $0x0000000000000000 -DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d -DATA consts<>+0x38(SB)/8, $0x0203000106070405 -DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c -DATA consts<>+0x48(SB)/8, $0x0102030005060704 -DATA consts<>+0x50(SB)/8, $0x6170786561707865 -DATA consts<>+0x58(SB)/8, $0x6170786561707865 -DATA consts<>+0x60(SB)/8, $0x3320646e3320646e -DATA consts<>+0x68(SB)/8, $0x3320646e3320646e -DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x90(SB)/8, $0x0000000100000000 -DATA consts<>+0x98(SB)/8, $0x0000000300000002 -DATA consts<>+0xa0(SB)/8, $0x5566774411223300 -DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 -DATA consts<>+0xb0(SB)/8, $0x6677445522330011 -DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +DATA consts<>+0x00(SB)/4, $0x61707865 +DATA consts<>+0x04(SB)/4, $0x3320646e +DATA consts<>+0x08(SB)/4, $0x79622d32 +DATA consts<>+0x0c(SB)/4, $0x6b206574 +DATA consts<>+0x10(SB)/4, $0x00000001 +DATA consts<>+0x14(SB)/4, $0x00000000 +DATA consts<>+0x18(SB)/4, $0x00000000 +DATA consts<>+0x1c(SB)/4, $0x00000000 +DATA consts<>+0x20(SB)/4, $0x00000004 +DATA consts<>+0x24(SB)/4, $0x00000000 +DATA consts<>+0x28(SB)/4, $0x00000000 +DATA consts<>+0x2c(SB)/4, $0x00000000 +DATA consts<>+0x30(SB)/4, $0x0e0f0c0d +DATA consts<>+0x34(SB)/4, $0x0a0b0809 +DATA consts<>+0x38(SB)/4, $0x06070405 +DATA consts<>+0x3c(SB)/4, $0x02030001 +DATA consts<>+0x40(SB)/4, $0x0d0e0f0c +DATA consts<>+0x44(SB)/4, $0x090a0b08 +DATA consts<>+0x48(SB)/4, $0x05060704 +DATA consts<>+0x4c(SB)/4, $0x01020300 +DATA consts<>+0x50(SB)/4, $0x61707865 +DATA consts<>+0x54(SB)/4, $0x61707865 +DATA consts<>+0x58(SB)/4, $0x61707865 +DATA consts<>+0x5c(SB)/4, $0x61707865 +DATA consts<>+0x60(SB)/4, $0x3320646e +DATA consts<>+0x64(SB)/4, $0x3320646e +DATA consts<>+0x68(SB)/4, $0x3320646e +DATA consts<>+0x6c(SB)/4, $0x3320646e +DATA consts<>+0x70(SB)/4, $0x79622d32 +DATA consts<>+0x74(SB)/4, $0x79622d32 +DATA consts<>+0x78(SB)/4, $0x79622d32 +DATA consts<>+0x7c(SB)/4, $0x79622d32 +DATA consts<>+0x80(SB)/4, $0x6b206574 +DATA consts<>+0x84(SB)/4, $0x6b206574 +DATA consts<>+0x88(SB)/4, $0x6b206574 +DATA consts<>+0x8c(SB)/4, $0x6b206574 +DATA consts<>+0x90(SB)/4, $0x00000000 +DATA consts<>+0x94(SB)/4, $0x00000001 +DATA consts<>+0x98(SB)/4, $0x00000002 +DATA consts<>+0x9c(SB)/4, $0x00000003 +DATA consts<>+0xa0(SB)/4, $0x11223300 +DATA consts<>+0xa4(SB)/4, $0x55667744 +DATA consts<>+0xa8(SB)/4, $0x99aabb88 +DATA consts<>+0xac(SB)/4, $0xddeeffcc +DATA consts<>+0xb0(SB)/4, $0x22330011 +DATA consts<>+0xb4(SB)/4, $0x66774455 +DATA consts<>+0xb8(SB)/4, $0xaabb8899 +DATA consts<>+0xbc(SB)/4, $0xeeffccdd GLOBL consts<>(SB), RODATA, $0xc0 +#ifdef GOARCH_ppc64 +#define BE_XXBRW_INIT() \ + LVSL (R0)(R0), V24 \ + VSPLTISB $3, V25 \ + VXOR V24, V25, V24 \ + +#define BE_XXBRW(vr) VPERM vr, vr, V24, vr +#else +#define BE_XXBRW_INIT() +#define BE_XXBRW(vr) +#endif + //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD out+0(FP), OUT @@ -94,6 +130,8 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // Clear V27 VXOR V27, V27, V27 + BE_XXBRW_INIT() + // V28 LXVW4X (CONSTBASE)(R11), VS60 @@ -299,6 +337,11 @@ loop_vsx: VADDUWM V8, V18, V8 VADDUWM V12, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -327,6 +370,11 @@ loop_vsx: VADDUWM V9, V18, V8 VADDUWM V13, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -334,8 +382,8 @@ loop_vsx: LXVW4X (INP)(R8), VS60 LXVW4X (INP)(R9), VS61 LXVW4X (INP)(R10), VS62 - VXOR V27, V0, V27 + VXOR V27, V0, V27 VXOR V28, V4, V28 VXOR V29, V8, V29 VXOR V30, V12, V30 @@ -354,6 +402,11 @@ loop_vsx: VADDUWM V10, V18, V8 VADDUWM V14, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -381,6 +434,11 @@ loop_vsx: VADDUWM V11, V18, V8 VADDUWM V15, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -408,9 +466,9 @@ loop_vsx: done_vsx: // Increment counter by number of 64 byte blocks - MOVD (CNT), R14 + MOVWZ (CNT), R14 ADD BLOCKS, R14 - MOVD R14, (CNT) + MOVWZ R14, (CNT) RET tail_vsx: diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index 333da285b..bd896bdc7 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego +//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go similarity index 95% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go index 4aec4874b..1a1679aaa 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s similarity index 89% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s index b3c1699bf..6899a1dab 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s @@ -2,15 +2,25 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) #include "textflag.h" // This was ported from the amd64 implementation. +#ifdef GOARCH_ppc64le +#define LE_MOVD MOVD +#define LE_MOVWZ MOVWZ +#define LE_MOVHZ MOVHZ +#else +#define LE_MOVD MOVDBR +#define LE_MOVWZ MOVWBR +#define LE_MOVHZ MOVHBR +#endif + #define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ + LE_MOVD (msg)( R0), t0; \ + LE_MOVD (msg)(R24), t1; \ MOVD $1, t2; \ ADDC t0, h0, h0; \ ADDE t1, h1, h1; \ @@ -50,10 +60,6 @@ ADDE t3, h1, h1; \ ADDZE h2 -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - // func update(state *[7]uint64, msg []byte) TEXT ·update(SB), $0-32 MOVD state+0(FP), R3 @@ -66,6 +72,8 @@ TEXT ·update(SB), $0-32 MOVD 24(R3), R11 // r0 MOVD 32(R3), R12 // r1 + MOVD $8, R24 + CMP R5, $16 BLT bytes_between_0_and_15 @@ -94,7 +102,7 @@ flush_buffer: // Greater than 8 -- load the rightmost remaining bytes in msg // and put into R17 (h1) - MOVD (R4)(R21), R17 + LE_MOVD (R4)(R21), R17 MOVD $16, R22 // Find the offset to those bytes @@ -118,7 +126,7 @@ just1: BLT less8 // Exactly 8 - MOVD (R4), R16 + LE_MOVD (R4), R16 CMP R17, $0 @@ -133,7 +141,7 @@ less8: MOVD $0, R22 // shift count CMP R5, $4 BLT less4 - MOVWZ (R4), R16 + LE_MOVWZ (R4), R16 ADD $4, R4 ADD $-4, R5 MOVD $32, R22 @@ -141,7 +149,7 @@ less8: less4: CMP R5, $2 BLT less2 - MOVHZ (R4), R21 + LE_MOVHZ (R4), R21 SLD R22, R21, R21 OR R16, R21, R16 ADD $16, R22 diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index 7e0230907..bbf391fe6 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -5,6 +5,10 @@ // Package sha3 implements the SHA-3 fixed-output-length hash functions and // the SHAKE variable-output-length hash functions defined by FIPS-202. // +// All types in this package also implement [encoding.BinaryMarshaler], +// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and +// unmarshal the internal state of the hash. +// // Both types of hash function use the "sponge" construction and the Keccak // permutation. For a detailed specification see http://keccak.noekeon.org/ // diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index c544b29e5..31fffbe04 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -48,33 +48,52 @@ func init() { crypto.RegisterHash(crypto.SHA3_512, New512) } +const ( + dsbyteSHA3 = 0b00000110 + dsbyteKeccak = 0b00000001 + dsbyteShake = 0b00011111 + dsbyteCShake = 0b00000100 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK448 = (1600 - 448) / 8 + rateK512 = (1600 - 512) / 8 + rateK768 = (1600 - 768) / 8 + rateK1024 = (1600 - 1024) / 8 +) + func new224Generic() *state { - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} } func new256Generic() *state { - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} } func new384Generic() *state { - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} } func new512Generic() *state { - return &state{rate: 72, outputLen: 64, dsbyte: 0x06} + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} } // NewLegacyKeccak256 creates a new Keccak-256 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} // NewLegacyKeccak512 creates a new Keccak-512 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} // Sum224 returns the SHA3-224 digest of the data. func Sum224(data []byte) (digest [28]byte) { diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index afedde5ab..6658c4447 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -4,6 +4,15 @@ package sha3 +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "unsafe" + + "golang.org/x/sys/cpu" +) + // spongeDirection indicates the direction bytes are flowing through the sponge. type spongeDirection int @@ -14,16 +23,13 @@ const ( spongeSqueezing ) -const ( - // maxRate is the maximum size of the internal buffer. SHAKE-256 - // currently needs the largest buffer. - maxRate = 168 -) - type state struct { - // Generic sponge components. - a [25]uint64 // main state of the hash - rate int // the number of bytes of state to use + a [1600 / 8]byte // main state of the hash + + // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR + // into before running the permutation. If squeezing, it's the remaining + // output to produce before running the permutation. + n, rate int // dsbyte contains the "domain separation" bits and the first bit of // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the @@ -39,10 +45,6 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - i, n int // storage[i:n] is the buffer, i is only used while squeezing - storage [maxRate]byte - - // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing } @@ -61,7 +63,7 @@ func (d *state) Reset() { d.a[i] = 0 } d.state = spongeAbsorbing - d.i, d.n = 0, 0 + d.n = 0 } func (d *state) clone() *state { @@ -69,22 +71,25 @@ func (d *state) clone() *state { return &ret } -// permute applies the KeccakF-1600 permutation. It handles -// any input-output buffering. +// permute applies the KeccakF-1600 permutation. func (d *state) permute() { - switch d.state { - case spongeAbsorbing: - // If we're absorbing, we need to xor the input into the state - // before applying the permutation. - xorIn(d, d.storage[:d.rate]) - d.n = 0 - keccakF1600(&d.a) - case spongeSqueezing: - // If we're squeezing, we need to apply the permutation before - // copying more output. - keccakF1600(&d.a) - d.i = 0 - copyOut(d, d.storage[:d.rate]) + var a *[25]uint64 + if cpu.IsBigEndian { + a = new([25]uint64) + for i := range a { + a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) + } + } else { + a = (*[25]uint64)(unsafe.Pointer(&d.a)) + } + + keccakF1600(a) + d.n = 0 + + if cpu.IsBigEndian { + for i := range a { + binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) + } } } @@ -92,53 +97,36 @@ func (d *state) permute() { // the multi-bitrate 10..1 padding rule, and permutes the state. func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in d.buf because, if it were full, + // at least one byte of space in the sponge because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.storage[d.n] = d.dsbyte - d.n++ - for d.n < d.rate { - d.storage[d.n] = 0 - d.n++ - } + d.a[d.n] ^= d.dsbyte // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.storage[d.rate-1] ^= 0x80 + d.a[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.n = d.rate - copyOut(d, d.storage[:d.rate]) } // Write absorbs more data into the hash's state. It panics if any // output has already been read. -func (d *state) Write(p []byte) (written int, err error) { +func (d *state) Write(p []byte) (n int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - written = len(p) + + n = len(p) for len(p) > 0 { - if d.n == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - keccakF1600(&d.a) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - d.n - if todo > len(p) { - todo = len(p) - } - d.n += copy(d.storage[d.n:], p[:todo]) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if d.n == d.rate { - d.permute() - } + x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) + d.n += x + p = p[x:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() } } @@ -156,14 +144,14 @@ func (d *state) Read(out []byte) (n int, err error) { // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.storage[d.i:d.n]) - d.i += n - out = out[n:] - // Apply the permutation if we've squeezed the sponge dry. - if d.i == d.rate { + if d.n == d.rate { d.permute() } + + x := copy(out, d.a[d.n:d.rate]) + d.n += x + out = out[x:] } return @@ -183,3 +171,74 @@ func (d *state) Sum(in []byte) []byte { dup.Read(hash) return append(in, hash...) } + +const ( + magicSHA3 = "sha\x08" + magicShake = "sha\x09" + magicCShake = "sha\x0a" + magicKeccak = "sha\x0b" + // magic || rate || main state || n || sponge direction + marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 +) + +func (d *state) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) +} + +func (d *state) AppendBinary(b []byte) ([]byte, error) { + switch d.dsbyte { + case dsbyteSHA3: + b = append(b, magicSHA3...) + case dsbyteShake: + b = append(b, magicShake...) + case dsbyteCShake: + b = append(b, magicCShake...) + case dsbyteKeccak: + b = append(b, magicKeccak...) + default: + panic("unknown dsbyte") + } + // rate is at most 168, and n is at most rate. + b = append(b, byte(d.rate)) + b = append(b, d.a[:]...) + b = append(b, byte(d.n), byte(d.state)) + return b, nil +} + +func (d *state) UnmarshalBinary(b []byte) error { + if len(b) != marshaledSize { + return errors.New("sha3: invalid hash state") + } + + magic := string(b[:len(magicSHA3)]) + b = b[len(magicSHA3):] + switch { + case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: + case magic == magicShake && d.dsbyte == dsbyteShake: + case magic == magicCShake && d.dsbyte == dsbyteCShake: + case magic == magicKeccak && d.dsbyte == dsbyteKeccak: + default: + return errors.New("sha3: invalid hash state identifier") + } + + rate := int(b[0]) + b = b[1:] + if rate != d.rate { + return errors.New("sha3: invalid hash state function") + } + + copy(d.a[:], b) + b = b[len(d.a):] + + n, state := int(b[0]), spongeDirection(b[1]) + if n > d.rate { + return errors.New("sha3: invalid hash state") + } + d.n = n + if state != spongeAbsorbing && state != spongeSqueezing { + return errors.New("sha3: invalid hash state") + } + d.state = state + + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index a01ef4357..a6b3a4281 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -16,9 +16,12 @@ package sha3 // [2] https://doi.org/10.6028/NIST.SP.800-185 import ( + "bytes" "encoding/binary" + "errors" "hash" "io" + "math/bits" ) // ShakeHash defines the interface to hash functions that support @@ -50,41 +53,33 @@ type cshakeState struct { initBlock []byte } -// Consts for configuring initial SHA-3 state -const ( - dsbyteShake = 0x1f - dsbyteCShake = 0x04 - rate128 = 168 - rate256 = 136 -) +func bytepad(data []byte, rate int) []byte { + out := make([]byte, 0, 9+len(data)+rate-1) + out = append(out, leftEncode(uint64(rate))...) + out = append(out, data...) + if padlen := rate - len(out)%rate; padlen < rate { + out = append(out, make([]byte, padlen)...) + } + return out +} -func bytepad(input []byte, w int) []byte { - // leftEncode always returns max 9 bytes - buf := make([]byte, 0, 9+len(input)+w) - buf = append(buf, leftEncode(uint64(w))...) - buf = append(buf, input...) - padlen := w - (len(buf) % w) - return append(buf, make([]byte, padlen)...) -} - -func leftEncode(value uint64) []byte { - var b [9]byte - binary.BigEndian.PutUint64(b[1:], value) - // Trim all but last leading zero bytes - i := byte(1) - for i < 8 && b[i] == 0 { - i++ +func leftEncode(x uint64) []byte { + // Let n be the smallest positive integer for which 2^(8n) > x. + n := (bits.Len64(x) + 7) / 8 + if n == 0 { + n = 1 } - // Prepend number of encoded bytes - b[i-1] = 9 - i - return b[i-1:] + // Return n || x with n as a byte and x an n bytes in big-endian order. + b := make([]byte, 9) + binary.BigEndian.PutUint64(b[1:], x) + b = b[9-n-1:] + b[0] = byte(n) + return b } func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} - - // leftEncode returns max 9 bytes - c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) @@ -111,6 +106,30 @@ func (c *state) Clone() ShakeHash { return c.clone() } +func (c *cshakeState) MarshalBinary() ([]byte, error) { + return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) +} + +func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { + b, err := c.state.AppendBinary(b) + if err != nil { + return nil, err + } + b = append(b, c.initBlock...) + return b, nil +} + +func (c *cshakeState) UnmarshalBinary(b []byte) error { + if len(b) <= marshaledSize { + return errors.New("sha3: invalid hash state") + } + if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { + return err + } + c.initBlock = bytes.Clone(b[marshaledSize:]) + return nil +} + // NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. @@ -126,11 +145,11 @@ func NewShake256() ShakeHash { } func newShake128Generic() *state { - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} } func newShake256Generic() *state { - return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} + return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} } // NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, @@ -143,7 +162,7 @@ func NewCShake128(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake128() } - return newCShake(N, S, rate128, 32, dsbyteCShake) + return newCShake(N, S, rateK256, 32, dsbyteCShake) } // NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, @@ -156,7 +175,7 @@ func NewCShake256(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake256() } - return newCShake(N, S, rate256, 64, dsbyteCShake) + return newCShake(N, S, rateK512, 64, dsbyteCShake) } // ShakeSum128 writes an arbitrary-length digest of data into hash. diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go deleted file mode 100644 index 6ada5c957..000000000 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import ( - "crypto/subtle" - "encoding/binary" - "unsafe" - - "golang.org/x/sys/cpu" -) - -// xorIn xors the bytes in buf into the state. -func xorIn(d *state, buf []byte) { - if cpu.IsBigEndian { - for i := 0; len(buf) >= 8; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } - } else { - ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) - subtle.XORBytes(ab[:], ab[:], buf) - } -} - -// copyOut copies uint64s to a byte buffer. -func copyOut(d *state, b []byte) { - if cpu.IsBigEndian { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } - } else { - ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) - copy(b, ab[:]) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index b93961010..b86dde151 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -555,6 +555,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe } gotMsgExtInfo := false + gotUserAuthInfoRequest := false for { packet, err := c.readPacket() if err != nil { @@ -585,6 +586,9 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe if msg.PartialSuccess { return authPartialSuccess, msg.Methods, nil } + if !gotUserAuthInfoRequest { + return authFailure, msg.Methods, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } return authFailure, msg.Methods, nil case msgUserAuthSuccess: return authSuccess, nil, nil @@ -596,6 +600,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe if err := Unmarshal(packet, &msg); err != nil { return authFailure, nil, err } + gotUserAuthInfoRequest = true // Manually unpack the prompt/echo pairs. rest := msg.Prompts diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 000000000..ec2acfe54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 000000000..b838cb9e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb1..32a44514e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33ef..ce208ce6d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9f..170d21ddf 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 08f35ea17..f1caf0f78 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -110,7 +110,6 @@ func doinit() { ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 000000000..a0fd7e2f7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c5..600a68078 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 000000000..4d0888b0c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab..7ca4fa12a 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecaba..6ab02b6c3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd434..230a94549 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1..7bf5c04bb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b46248..ccba391c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2625,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c0..0c00cb3f3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f425..dfb364554 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec7..d46dcf78a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162b..3af3248a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -240,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628fa..292bcf028 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -238,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3cf..782b7110f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a76..84973fd92 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e8522..6d9cbc3b2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c646871..5f9fedbce 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d63..bb0026ee0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df73419..46120db5c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 247913792..5c951634f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146e..11a84d5af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d64439..f78c4617c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a1..aeb777c34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -239,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da557..5cc1e8eb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e4549..8daaf3faf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af46..2e5d5a443 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a314..4510bfc3f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c..51311e205 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2203,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc0..6f5252880 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md index d03d0aefe..05ff623f9 100644 --- a/vendor/golang.org/x/term/README.md +++ b/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index a6b508188..6ad1b1c1d 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/status.proto package status diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781..5d4096d46 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79b..b572707c6 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e10870..abab27937 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50..d7b40b7cb 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f7..b181f386a 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8..2b87bd79c 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 87% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 885362661..4d69b4052 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,38 +16,48 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } func (pickfirstBuilder) Name() string { - return PickFirstBalancerName + return Name } type pfConfig struct { @@ -93,6 +103,12 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by @@ -124,7 +140,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -139,13 +155,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad22..260255d31 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index af39b8a4c..8ad6ce2f0 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -198,6 +208,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { @@ -248,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -314,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 1afb1e84a..55bffaa77 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 2359f94b8..9c8850e3f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -31,6 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" @@ -38,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -72,6 +75,8 @@ var ( // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -152,6 +157,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -160,7 +175,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -168,30 +183,24 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) + if err = cc.initAuthority(); err != nil { return nil, err } + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -586,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -622,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -641,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -692,8 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - balancer.Register(pickfirstBuilder{}) - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -809,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -832,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -915,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -944,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -963,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -989,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1187,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1201,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1211,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1228,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1519,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1582,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport @@ -1673,22 +1664,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1707,15 +1695,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { defScheme = resolver.GetDefaultScheme() } - channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1805,7 +1790,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1838,6 +1823,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd4..e840858b7 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443..4c805c644 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34ed..411435854 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 00273702b..2b285beee 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -32,10 +33,16 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,10 +50,18 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -78,8 +93,8 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -89,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -490,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -649,12 +679,13 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } @@ -712,25 +743,25 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. // -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) } -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad..e7b532b6f 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d71..11d0ae142 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 000000000..074c5e234 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03..ceec319dd 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 000000000..1d827dd5d --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 000000000..3221f7a63 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced..f1ae080dc 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb..db320105e 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 000000000..59c03bc14 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 000000000..e524fdd40 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102a..07df71e98 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d826..4b2035857 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d71..892dc13d1 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011a..b15cf482d 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a87..966932891 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b089..64c791953 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e150..078bb8123 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a..0e6e18e18 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 9c915d9e4..452985f8d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -40,6 +40,16 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e17..7617be215 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de7..092ad187a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b51..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16a..19b9d6392 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1a..6d8c2f518 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 48d24bdb4..7aae9240f 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -174,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -184,25 +193,45 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - - // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client - // singleton to invoke resource not found for a resource type name and - // resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) - // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. - UserSetDefaultScheme bool = false + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index f3f52a59a..4552db16b 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,7 +36,6 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -63,6 +63,8 @@ var ( func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -209,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = MinResolutionInterval + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -223,13 +225,13 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } @@ -423,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index a7ecaf8d5..c0eae4f5f 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -51,11 +51,22 @@ var ( // The following vars are overridden from tests. var ( // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572..b901c7bac 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 000000000..fd33af51a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 000000000..be110d41f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc8205..757925381 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd7..54c24c2ff 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7f..7e7aaa546 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a89..d5c1085ee 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a..ef72fbb3a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29..ce878693b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c7069..c769deab5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index cab0e2d3d..584b50fe5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -38,12 +39,12 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: @@ -1440,7 +1455,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd4..3613d7b64 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa10325..54b224436 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade..fdd6fa86c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err + } + t.windowHandler(len(header)) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err } - t.windowHandler(n) - return + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7..eb42b19fb 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 000000000..c37c58c02 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 000000000..228e9c2f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 000000000..4d66b2ccc --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6..d2e15253b 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 56e8aba78..bdaa2130e 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -22,7 +22,7 @@ import ( "context" "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -33,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -98,20 +106,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string @@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd63364..e87a17f36 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c..000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 9dcc9780f..23bb3fb25 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -171,12 +171,15 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e9..2d96f1405 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792..d1e1415a4 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 9da8fc802..2671c5ef6 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" @@ -163,9 +164,11 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } @@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } c := rsc.LoadBalancingConfig if c == nil { - name := PickFirstBalancerName + name := pickfirst.Name if rsc.LoadBalancingPolicy != nil { name = *rsc.LoadBalancingPolicy } if balancer.Get(name) == nil { - name = PickFirstBalancerName + name = pickfirst.Name } cfg := []map[string]any{{name: struct{}{}}} strCfg, err := json.Marshal(cfg) @@ -218,7 +221,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8..000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd651..71195c494 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b54563e81..bb2b2a216 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,13 +35,13 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -699,7 +710,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c..0037fee0b 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index a0b782890..187fbf119 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.64.1" +const Version = "1.67.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index ab9ecb248..dfff6f926 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# dario.cat/mergo v1.0.0 +# dario.cat/mergo v1.0.1 ## explicit; go 1.13 dario.cat/mergo # github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 @@ -18,8 +18,8 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.5 -## explicit; go 1.21 +# github.com/Microsoft/hcsshim v0.12.9 +## explicit; go 1.22 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage github.com/Microsoft/hcsshim/internal/cow @@ -66,11 +66,11 @@ github.com/blang/semver/v4 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/checkpoint-restore/checkpointctl v1.2.1 +# github.com/checkpoint-restore/checkpointctl v1.3.0 ## explicit; go 1.21 github.com/checkpoint-restore/checkpointctl/lib -# github.com/checkpoint-restore/go-criu/v7 v7.1.0 -## explicit; go 1.18 +# github.com/checkpoint-restore/go-criu/v7 v7.2.0 +## explicit; go 1.20 github.com/checkpoint-restore/go-criu/v7 github.com/checkpoint-restore/go-criu/v7/rpc github.com/checkpoint-restore/go-criu/v7/stats @@ -80,23 +80,26 @@ github.com/chzyer/readline # github.com/containerd/cgroups/v3 v3.0.3 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats -# github.com/containerd/containerd v1.7.18 -## explicit; go 1.21 -github.com/containerd/containerd/errdefs -github.com/containerd/containerd/pkg/userns -github.com/containerd/containerd/platforms -# github.com/containerd/errdefs v0.1.0 +# github.com/containerd/errdefs v0.3.0 ## explicit; go 1.20 github.com/containerd/errdefs +# github.com/containerd/errdefs/pkg v0.3.0 +## explicit; go 1.22 +github.com/containerd/errdefs/pkg/errgrpc +github.com/containerd/errdefs/pkg/internal/cause +github.com/containerd/errdefs/pkg/internal/types # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log +# github.com/containerd/platforms v0.2.1 +## explicit; go 1.20 +github.com/containerd/platforms # github.com/containerd/stargz-snapshotter/estargz v0.15.1 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containerd/typeurl/v2 v2.1.1 -## explicit; go 1.13 +# github.com/containerd/typeurl/v2 v2.2.0 +## explicit; go 1.21 github.com/containerd/typeurl/v2 # github.com/containernetworking/cni v1.2.3 ## explicit; go 1.21 @@ -113,8 +116,8 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.5.1 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.37.5 -## explicit; go 1.21.0 +# github.com/containers/buildah v1.38.0 +## explicit; go 1.22.6 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -131,6 +134,7 @@ github.com/containers/buildah/internal/sbom github.com/containers/buildah/internal/tmpdir github.com/containers/buildah/internal/util github.com/containers/buildah/internal/volumes +github.com/containers/buildah/pkg/binfmt github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/jail @@ -139,9 +143,10 @@ github.com/containers/buildah/pkg/parse github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util +github.com/containers/buildah/pkg/volumes github.com/containers/buildah/util -# github.com/containers/common v0.60.4 -## explicit; go 1.21.0 +# github.com/containers/common v0.61.0 +## explicit; go 1.22.6 github.com/containers/common/internal github.com/containers/common/internal/attributedstring github.com/containers/common/libimage @@ -207,8 +212,8 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.32.2 -## explicit; go 1.21.0 +# github.com/containers/image/v5 v5.33.0 +## explicit; go 1.22.6 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -278,7 +283,7 @@ github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust -# github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c +# github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 ## explicit; go 1.20 github.com/containers/luksy # github.com/containers/ocicrypt v1.2.0 @@ -297,8 +302,8 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/podman/v5 v5.2.5 -## explicit; go 1.21.0 +# github.com/containers/podman/v5 v5.3.0 +## explicit; go 1.22.6 github.com/containers/podman/v5/cmd/podman/parse github.com/containers/podman/v5/cmd/podman/registry github.com/containers/podman/v5/libpod @@ -393,8 +398,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.55.1 -## explicit; go 1.21 +# github.com/containers/storage v1.56.0 +## explicit; go 1.22.0 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -454,8 +459,8 @@ github.com/coreos/go-systemd/v22/sdjournal # github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer -# github.com/cyphar/filepath-securejoin v0.3.1 -## explicit; go 1.20 +# github.com/cyphar/filepath-securejoin v0.3.4 +## explicit; go 1.21 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit @@ -471,7 +476,7 @@ github.com/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v27.1.1+incompatible +# github.com/docker/docker v27.3.1+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -517,7 +522,7 @@ github.com/docker/docker-credential-helpers/credentials github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig -# github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 +# github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8 ## explicit github.com/docker/go-plugins-helpers/sdk github.com/docker/go-plugins-helpers/volume @@ -527,13 +532,14 @@ github.com/docker/go-units # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.11.1 -## explicit; go 1.21 +github.com/fsnotify/fsnotify/internal +# github.com/fsouza/go-dockerclient v1.12.0 +## explicit; go 1.22 github.com/fsouza/go-dockerclient -# github.com/go-jose/go-jose/v4 v4.0.2 +# github.com/go-jose/go-jose/v4 v4.0.4 ## explicit; go 1.21 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher @@ -585,8 +591,8 @@ github.com/go-openapi/validate # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 github.com/go-task/slim-sprig/v3 -# github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 -## explicit; go 1.12 +# github.com/godbus/dbus/v5 v5.1.1-0.20240921181615-a817f3cc4a9e +## explicit; go 1.20 github.com/godbus/dbus/v5 # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 @@ -604,7 +610,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.20.0 +# github.com/google/go-containerregistry v0.20.2 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -652,8 +658,11 @@ github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/kevinburke/ssh_config v1.2.0 +## explicit +github.com/kevinburke/ssh_config +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -668,8 +677,8 @@ github.com/klauspost/pgzip # github.com/kr/fs v0.1.0 ## explicit github.com/kr/fs -# github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 -## explicit; go 1.21 +# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec +## explicit; go 1.22.0 github.com/letsencrypt/boulder/core github.com/letsencrypt/boulder/goodkey github.com/letsencrypt/boulder/identifier @@ -692,7 +701,7 @@ github.com/mattn/go-runewidth # github.com/mattn/go-shellwords v1.0.12 ## explicit; go 1.13 github.com/mattn/go-shellwords -# github.com/mattn/go-sqlite3 v1.14.22 +# github.com/mattn/go-sqlite3 v1.14.24 ## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/mdlayher/socket v0.4.1 @@ -710,8 +719,8 @@ github.com/mistifyio/go-zfs/v3 # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/moby/buildkit v0.12.5 -## explicit; go 1.20 +# github.com/moby/buildkit v0.17.1 +## explicit; go 1.22.0 github.com/moby/buildkit/frontend/dockerfile/command github.com/moby/buildkit/frontend/dockerfile/parser github.com/moby/buildkit/frontend/dockerfile/shell @@ -722,15 +731,21 @@ github.com/moby/docker-image-spec/specs-go/v1 # github.com/moby/patternmatcher v0.6.0 ## explicit; go 1.19 github.com/moby/patternmatcher +# github.com/moby/sys/capability v0.3.0 +## explicit; go 1.21 +github.com/moby/sys/capability # github.com/moby/sys/mountinfo v0.7.2 ## explicit; go 1.17 github.com/moby/sys/mountinfo -# github.com/moby/sys/sequential v0.5.0 +# github.com/moby/sys/sequential v0.6.0 ## explicit; go 1.17 github.com/moby/sys/sequential # github.com/moby/sys/user v0.3.0 ## explicit; go 1.17 github.com/moby/sys/user +# github.com/moby/sys/userns v0.1.0 +## explicit; go 1.21 +github.com/moby/sys/userns # github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term @@ -801,7 +816,7 @@ github.com/opencontainers/go-digest ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.13 => github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 +# github.com/opencontainers/runc v1.2.1 => github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 ## explicit; go 1.20 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -816,21 +831,22 @@ github.com/opencontainers/runc/libcontainer/utils # github.com/opencontainers/runtime-spec v1.2.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc +# github.com/opencontainers/runtime-tools v0.9.1-0.20241001195557-6c9570a1678f ## explicit; go 1.19 github.com/opencontainers/runtime-tools/generate github.com/opencontainers/runtime-tools/generate/seccomp github.com/opencontainers/runtime-tools/validate/capabilities -# github.com/opencontainers/selinux v1.11.0 +# github.com/opencontainers/selinux v1.11.1 ## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.14 -## explicit; go 1.19 +# github.com/openshift/imagebuilder v1.2.15 +## explicit; go 1.21.0 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command github.com/openshift/imagebuilder/dockerfile/parser +github.com/openshift/imagebuilder/internal github.com/openshift/imagebuilder/signal github.com/openshift/imagebuilder/strslice # github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f @@ -840,10 +856,13 @@ github.com/ostreedev/ostree-go/pkg/otbuiltin # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.6 +# github.com/pkg/sftp v1.13.7 ## explicit; go 1.15 github.com/pkg/sftp github.com/pkg/sftp/internal/encoding/ssh/filexfer +# github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 +## explicit; go 1.20 +github.com/planetscale/vtprotobuf/protohelpers # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib @@ -886,14 +905,14 @@ github.com/seccomp/libseccomp-golang # github.com/secure-systems-lab/go-securesystemslib v0.8.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/encrypted -# github.com/sigstore/fulcio v1.4.5 -## explicit; go 1.21 +# github.com/sigstore/fulcio v1.6.4 +## explicit; go 1.22.6 github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/rekor v1.3.6 ## explicit; go 1.21 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.8.4 -## explicit; go 1.21 +# github.com/sigstore/sigstore v1.8.9 +## explicit; go 1.22.5 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/options @@ -901,6 +920,9 @@ github.com/sigstore/sigstore/pkg/signature/payload # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus +# github.com/skeema/knownhosts v1.3.0 +## explicit; go 1.17 +github.com/skeema/knownhosts # github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra @@ -910,8 +932,8 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 ## explicit; go 1.19 github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.18.0 -## explicit; go 1.21.0 +# github.com/sylabs/sif/v2 v2.19.1 +## explicit; go 1.22.5 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -928,26 +950,26 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vbatts/tar-split v0.11.5 +# github.com/vbatts/tar-split v0.11.6 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.7.5 +# github.com/vbauerster/mpb/v8 v8.8.3 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter github.com/vbauerster/mpb/v8/decor github.com/vbauerster/mpb/v8/internal -# github.com/vishvananda/netlink v1.2.1-beta.2 +# github.com/vishvananda/netlink v1.3.0 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.4 ## explicit; go 1.17 github.com/vishvananda/netns -# go.etcd.io/bbolt v1.3.10 -## explicit; go 1.21 +# go.etcd.io/bbolt v1.3.11 +## explicit; go 1.22 go.etcd.io/bbolt # go.mongodb.org/mongo-driver v1.14.0 ## explicit; go 1.18 @@ -968,12 +990,13 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -984,15 +1007,16 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.20.0 -# go.opentelemetry.io/otel/metric v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.24.0 +# go.opentelemetry.io/otel/metric v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/trace v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded -# golang.org/x/crypto v0.28.0 +# golang.org/x/crypto v0.29.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1022,8 +1046,8 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices @@ -1051,21 +1075,21 @@ golang.org/x/net/trace golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.9.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.26.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.25.0 +# golang.org/x/term v0.26.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.19.0 +# golang.org/x/text v0.20.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1087,24 +1111,25 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.26.0 ## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.64.1 -## explicit; go 1.19 +# google.golang.org/grpc v1.67.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1114,7 +1139,9 @@ google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch @@ -1125,7 +1152,6 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle @@ -1137,11 +1163,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver